xref: /optee_os/core/arch/arm/kernel/entry_a32.S (revision 2cd578baae174c61b35a48d9016facbcc0fffbfe)
165401337SJens Wiklander/* SPDX-License-Identifier: BSD-2-Clause */
265401337SJens Wiklander/*
3*2cd578baSJens Wiklander * Copyright (c) 2014-2025, Linaro Limited
4809fa817SBalint Dobszay * Copyright (c) 2021-2023, Arm Limited
565401337SJens Wiklander */
665401337SJens Wiklander
765401337SJens Wiklander#include <arm.h>
8b5ec8152SJens Wiklander#include <arm32_macros.S>
965401337SJens Wiklander#include <asm.S>
1065401337SJens Wiklander#include <generated/asm-defines.h>
1165401337SJens Wiklander#include <keep.h>
1265401337SJens Wiklander#include <kernel/asan.h>
1365401337SJens Wiklander#include <kernel/cache_helpers.h>
14b5ec8152SJens Wiklander#include <kernel/thread.h>
157e399f9bSJens Wiklander#include <kernel/thread_private.h>
1659724f22SJens Wiklander#include <kernel/thread_private_arch.h>
17460c9735SJens Wiklander#include <mm/core_mmu.h>
1865401337SJens Wiklander#include <platform_config.h>
1965401337SJens Wiklander#include <sm/optee_smc.h>
2065401337SJens Wiklander#include <sm/teesmc_opteed.h>
2165401337SJens Wiklander#include <sm/teesmc_opteed_macros.h>
2265401337SJens Wiklander
2365401337SJens Wiklander.arch_extension sec
2465401337SJens Wiklander
2565401337SJens Wiklander.section .data
2665401337SJens Wiklander.balign 4
2765401337SJens Wiklander
2865401337SJens Wiklander#ifdef CFG_BOOT_SYNC_CPU
2965401337SJens Wiklander.equ SEM_CPU_READY, 1
3065401337SJens Wiklander#endif
3165401337SJens Wiklander
3265401337SJens Wiklander#ifdef CFG_PL310
3365401337SJens Wiklander.section .rodata.init
3465401337SJens Wiklanderpanic_boot_file:
3565401337SJens Wiklander	.asciz __FILE__
3665401337SJens Wiklander
3765401337SJens Wiklander/*
3865401337SJens Wiklander * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
3965401337SJens Wiklander */
4065401337SJens WiklanderLOCAL_FUNC __assert_flat_mapped_range , :
4165401337SJens WiklanderUNWIND(	.cantunwind)
4265401337SJens Wiklander	push	{ r4-r6, lr }
4365401337SJens Wiklander	mov	r4, r0
4465401337SJens Wiklander	mov	r5, r1
4565401337SJens Wiklander	bl	cpu_mmu_enabled
4665401337SJens Wiklander	cmp	r0, #0
4765401337SJens Wiklander	beq	1f
4865401337SJens Wiklander	mov	r0, r4
4965401337SJens Wiklander	bl	virt_to_phys
5065401337SJens Wiklander	cmp	r0, r4
5165401337SJens Wiklander	beq	1f
5265401337SJens Wiklander	/*
5365401337SJens Wiklander	 * this must be compliant with the panic generic routine:
5465401337SJens Wiklander	 * __do_panic(__FILE__, __LINE__, __func__, str)
5565401337SJens Wiklander	 */
5665401337SJens Wiklander	ldr	r0, =panic_boot_file
5765401337SJens Wiklander	mov	r1, r5
5865401337SJens Wiklander	mov	r2, #0
5965401337SJens Wiklander	mov	r3, #0
6065401337SJens Wiklander	bl	__do_panic
6165401337SJens Wiklander	b	.		/* should NOT return */
6265401337SJens Wiklander1:	pop	{ r4-r6, pc }
6365401337SJens WiklanderEND_FUNC __assert_flat_mapped_range
6465401337SJens Wiklander
6565401337SJens Wiklander	/* panic if mmu is enable and vaddr != paddr (scratch lr) */
6665401337SJens Wiklander	.macro assert_flat_mapped_range va, line
6765401337SJens Wiklander		ldr	r0, \va
6865401337SJens Wiklander		ldr	r1, =\line
6965401337SJens Wiklander		bl	__assert_flat_mapped_range
7065401337SJens Wiklander	.endm
7165401337SJens Wiklander#endif /* CFG_PL310 */
7265401337SJens Wiklander
735aaab9c0SJerome ForissierWEAK_FUNC plat_cpu_reset_early , :
7465401337SJens Wiklander	bx	lr
7565401337SJens WiklanderEND_FUNC plat_cpu_reset_early
7665401337SJens WiklanderDECLARE_KEEP_PAGER plat_cpu_reset_early
7765401337SJens Wiklander
7865401337SJens Wiklander	.section .identity_map, "ax"
7965401337SJens Wiklander	.align 5
8065401337SJens WiklanderLOCAL_FUNC reset_vect_table , : , .identity_map
8165401337SJens Wiklander	b	.
8265401337SJens Wiklander	b	.	/* Undef */
8365401337SJens Wiklander	b	.	/* Syscall */
8465401337SJens Wiklander	b	.	/* Prefetch abort */
8565401337SJens Wiklander	b	.	/* Data abort */
8665401337SJens Wiklander	b	.	/* Reserved */
8765401337SJens Wiklander	b	.	/* IRQ */
8865401337SJens Wiklander	b	.	/* FIQ */
8965401337SJens WiklanderEND_FUNC reset_vect_table
9065401337SJens Wiklander
9165401337SJens Wiklander	.macro cpu_is_ready
9265401337SJens Wiklander#ifdef CFG_BOOT_SYNC_CPU
9365401337SJens Wiklander	bl	__get_core_pos
9465401337SJens Wiklander	lsl	r0, r0, #2
9565401337SJens Wiklander	ldr	r1,=sem_cpu_sync
9665401337SJens Wiklander	ldr	r2, =SEM_CPU_READY
9765401337SJens Wiklander	str	r2, [r1, r0]
9865401337SJens Wiklander	dsb
9965401337SJens Wiklander	sev
10065401337SJens Wiklander#endif
10165401337SJens Wiklander	.endm
10265401337SJens Wiklander
10365401337SJens Wiklander	.macro wait_primary
10465401337SJens Wiklander#ifdef CFG_BOOT_SYNC_CPU
10565401337SJens Wiklander	ldr	r0, =sem_cpu_sync
10665401337SJens Wiklander	mov	r2, #SEM_CPU_READY
10765401337SJens Wiklander	sev
10865401337SJens Wiklander1:
10965401337SJens Wiklander	ldr	r1, [r0]
11065401337SJens Wiklander	cmp	r1, r2
11165401337SJens Wiklander	wfene
11265401337SJens Wiklander	bne	1b
11365401337SJens Wiklander#endif
11465401337SJens Wiklander	.endm
11565401337SJens Wiklander
11665401337SJens Wiklander	.macro wait_secondary
11765401337SJens Wiklander#ifdef CFG_BOOT_SYNC_CPU
11865401337SJens Wiklander	ldr	r0, =sem_cpu_sync
11965401337SJens Wiklander	mov	r3, #CFG_TEE_CORE_NB_CORE
12065401337SJens Wiklander	mov	r2, #SEM_CPU_READY
12165401337SJens Wiklander	sev
12265401337SJens Wiklander1:
12365401337SJens Wiklander	subs	r3, r3, #1
12465401337SJens Wiklander	beq	3f
12565401337SJens Wiklander	add	r0, r0, #4
12665401337SJens Wiklander2:
12765401337SJens Wiklander	ldr	r1, [r0]
12865401337SJens Wiklander	cmp	r1, r2
12965401337SJens Wiklander	wfene
13065401337SJens Wiklander	bne	2b
13165401337SJens Wiklander	b	1b
13265401337SJens Wiklander3:
13365401337SJens Wiklander#endif
13465401337SJens Wiklander	.endm
13565401337SJens Wiklander
13665401337SJens Wiklander	/*
13765401337SJens Wiklander	 * set_sctlr : Setup some core configuration in CP15 SCTLR
13865401337SJens Wiklander	 *
13965401337SJens Wiklander	 * Setup required by current implementation of the OP-TEE core:
14065401337SJens Wiklander	 * - Disable data and instruction cache.
14165401337SJens Wiklander	 * - MMU is expected off and exceptions trapped in ARM mode.
14265401337SJens Wiklander	 * - Enable or disable alignment checks upon platform configuration.
14365401337SJens Wiklander	 * - Optionally enable write-implies-execute-never.
14465401337SJens Wiklander	 * - Optionally enable round robin strategy for cache replacement.
14565401337SJens Wiklander	 *
14665401337SJens Wiklander	 * Clobbers r0.
14765401337SJens Wiklander	 */
14865401337SJens Wiklander	.macro set_sctlr
14965401337SJens Wiklander		read_sctlr r0
15065401337SJens Wiklander		bic	r0, r0, #(SCTLR_M | SCTLR_C)
15165401337SJens Wiklander		bic	r0, r0, #SCTLR_I
15265401337SJens Wiklander		bic	r0, r0, #SCTLR_TE
15365401337SJens Wiklander		orr	r0, r0, #SCTLR_SPAN
15465401337SJens Wiklander#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
15565401337SJens Wiklander		orr	r0, r0, #SCTLR_A
15665401337SJens Wiklander#else
15765401337SJens Wiklander		bic	r0, r0, #SCTLR_A
15865401337SJens Wiklander#endif
15965401337SJens Wiklander#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
16065401337SJens Wiklander		orr	r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
16165401337SJens Wiklander#endif
16265401337SJens Wiklander#if defined(CFG_ENABLE_SCTLR_RR)
16365401337SJens Wiklander		orr	r0, r0, #SCTLR_RR
16465401337SJens Wiklander#endif
16565401337SJens Wiklander		write_sctlr r0
16665401337SJens Wiklander	.endm
16765401337SJens Wiklander
16865401337SJens Wiklander	.macro maybe_init_spectre_workaround
16965401337SJens Wiklander#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \
17065401337SJens Wiklander    (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \
17165401337SJens Wiklander     defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC))
17265401337SJens Wiklander	read_midr r0
17365401337SJens Wiklander	ubfx	r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
17465401337SJens Wiklander	cmp	r1, #MIDR_IMPLEMENTER_ARM
17565401337SJens Wiklander	bne	1f
17665401337SJens Wiklander	ubfx	r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \
17765401337SJens Wiklander			#MIDR_PRIMARY_PART_NUM_WIDTH
17865401337SJens Wiklander
17965401337SJens Wiklander	movw	r2, #CORTEX_A8_PART_NUM
18065401337SJens Wiklander	cmp	r1, r2
18165401337SJens Wiklander	moveq	r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB
18265401337SJens Wiklander	beq	2f
18365401337SJens Wiklander
18465401337SJens Wiklander	movw	r2, #CORTEX_A15_PART_NUM
18565401337SJens Wiklander	cmp	r1, r2
18665401337SJens Wiklander	moveq	r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB
18765401337SJens Wiklander	bne	1f	/* Skip it for all other CPUs */
18865401337SJens Wiklander2:
18965401337SJens Wiklander	read_actlr r0
19065401337SJens Wiklander	orr	r0, r0, r2
19165401337SJens Wiklander	write_actlr r0
19265401337SJens Wiklander	isb
19365401337SJens Wiklander1:
19465401337SJens Wiklander#endif
19565401337SJens Wiklander	.endm
19665401337SJens Wiklander
19765401337SJens WiklanderFUNC _start , :
19865401337SJens WiklanderUNWIND(	.cantunwind)
199f332e77cSJens Wiklander	/*
200f332e77cSJens Wiklander	 * Temporary copy of boot argument registers, will be passed to
201f332e77cSJens Wiklander	 * boot_save_args() further down.
202f332e77cSJens Wiklander	 */
203f332e77cSJens Wiklander	mov	r4, r0
204f332e77cSJens Wiklander	mov	r5, r1
205f332e77cSJens Wiklander	mov	r6, r2
206f332e77cSJens Wiklander	mov	r7, r3
207f332e77cSJens Wiklander	mov	r8, lr
20865401337SJens Wiklander
20965401337SJens Wiklander	/*
21065401337SJens Wiklander	 * 32bit entry is expected to execute Supervisor mode,
21165401337SJens Wiklander	 * some bootloader may enter in Supervisor or Monitor
21265401337SJens Wiklander	 */
21365401337SJens Wiklander	cps	#CPSR_MODE_SVC
21465401337SJens Wiklander
21565401337SJens Wiklander	/* Early ARM secure MP specific configuration */
21665401337SJens Wiklander	bl	plat_cpu_reset_early
21765401337SJens Wiklander	maybe_init_spectre_workaround
21865401337SJens Wiklander
21965401337SJens Wiklander	set_sctlr
22065401337SJens Wiklander	isb
22165401337SJens Wiklander
22265401337SJens Wiklander	ldr	r0, =reset_vect_table
22365401337SJens Wiklander	write_vbar r0
22465401337SJens Wiklander
22565401337SJens Wiklander#if defined(CFG_WITH_ARM_TRUSTED_FW)
22665401337SJens Wiklander	b	reset_primary
22765401337SJens Wiklander#else
22865401337SJens Wiklander	bl	__get_core_pos
22965401337SJens Wiklander	cmp	r0, #0
23065401337SJens Wiklander	beq	reset_primary
23165401337SJens Wiklander	b	reset_secondary
23265401337SJens Wiklander#endif
23365401337SJens WiklanderEND_FUNC _start
23465401337SJens WiklanderDECLARE_KEEP_INIT _start
23565401337SJens Wiklander
23665401337SJens Wiklander	/*
23765401337SJens Wiklander	 * Setup sp to point to the top of the tmp stack for the current CPU:
238528dabb2SJerome Forissier	 * sp is assigned:
239528dabb2SJerome Forissier	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
24065401337SJens Wiklander	 */
24165401337SJens Wiklander	.macro set_sp
24265401337SJens Wiklander		bl	__get_core_pos
24365401337SJens Wiklander		cmp	r0, #CFG_TEE_CORE_NB_CORE
24465401337SJens Wiklander		/* Unsupported CPU, park it before it breaks something */
24565401337SJens Wiklander		bge	unhandled_cpu
246528dabb2SJerome Forissier		add	r0, r0, #1
247528dabb2SJerome Forissier
248528dabb2SJerome Forissier		/* r2 = stack_tmp - STACK_TMP_GUARD */
249528dabb2SJerome Forissier		adr	r3, stack_tmp_rel
250528dabb2SJerome Forissier		ldr	r2, [r3]
251528dabb2SJerome Forissier		add	r2, r2, r3
25265401337SJens Wiklander
25365401337SJens Wiklander		/*
25465401337SJens Wiklander		 * stack_tmp_stride and stack_tmp_stride_rel are the
25565401337SJens Wiklander		 * equivalent of:
25665401337SJens Wiklander		 * extern const u32 stack_tmp_stride;
25765401337SJens Wiklander		 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride -
25865401337SJens Wiklander		 *			      (u32)&stack_tmp_stride_rel
25965401337SJens Wiklander		 *
26065401337SJens Wiklander		 * To load the value of stack_tmp_stride we do the equivalent
26165401337SJens Wiklander		 * of:
26265401337SJens Wiklander		 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel)
26365401337SJens Wiklander		 */
26465401337SJens Wiklander		adr	r3, stack_tmp_stride_rel
26565401337SJens Wiklander		ldr	r1, [r3]
26665401337SJens Wiklander		ldr	r1, [r1, r3]
26765401337SJens Wiklander
26865401337SJens Wiklander		/*
269528dabb2SJerome Forissier		 * r0 is core pos + 1
27065401337SJens Wiklander		 * r1 is value of stack_tmp_stride
271528dabb2SJerome Forissier		 * r2 is value of stack_tmp + guard
27265401337SJens Wiklander		 */
27365401337SJens Wiklander		mul	r1, r0, r1
27465401337SJens Wiklander		add	sp, r1, r2
27565401337SJens Wiklander	.endm
27665401337SJens Wiklander
27765401337SJens Wiklander	/*
27865401337SJens Wiklander	 * Cache maintenance during entry: handle outer cache.
27965401337SJens Wiklander	 * End address is exclusive: first byte not to be changed.
28065401337SJens Wiklander	 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
28165401337SJens Wiklander	 *
28265401337SJens Wiklander	 * Use ANSI #define to trap source file line number for PL310 assertion
28365401337SJens Wiklander	 */
28465401337SJens Wiklander	.macro __inval_cache_vrange vbase, vend, line
28565401337SJens Wiklander#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
28665401337SJens Wiklander		assert_flat_mapped_range (\vbase), (\line)
28765401337SJens Wiklander		bl	pl310_base
28865401337SJens Wiklander		ldr	r1, \vbase
2895727b6afSJens Wiklander		ldr	r2, =\vend
2905727b6afSJens Wiklander		ldr	r2, [r2]
29165401337SJens Wiklander		bl	arm_cl2_invbypa
29265401337SJens Wiklander#endif
29365401337SJens Wiklander		ldr	r0, \vbase
2945727b6afSJens Wiklander		ldr	r1, =\vend
2955727b6afSJens Wiklander		ldr	r1, [r1]
29665401337SJens Wiklander		sub	r1, r1, r0
29765401337SJens Wiklander		bl	dcache_inv_range
29865401337SJens Wiklander	.endm
29965401337SJens Wiklander
30065401337SJens Wiklander	.macro __flush_cache_vrange vbase, vend, line
30165401337SJens Wiklander#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
30265401337SJens Wiklander		assert_flat_mapped_range (\vbase), (\line)
30365401337SJens Wiklander		ldr	r0, \vbase
3045727b6afSJens Wiklander		ldr	r1, =\vend
3055727b6afSJens Wiklander		ldr	r1, [r1]
30665401337SJens Wiklander		sub	r1, r1, r0
30765401337SJens Wiklander		bl	dcache_clean_range
30865401337SJens Wiklander		bl	pl310_base
30965401337SJens Wiklander		ldr	r1, \vbase
3105727b6afSJens Wiklander		ldr	r2, =\vend
3115727b6afSJens Wiklander		ldr	r2, [r2]
31265401337SJens Wiklander		bl	arm_cl2_cleaninvbypa
31365401337SJens Wiklander#endif
31465401337SJens Wiklander		ldr	r0, \vbase
3155727b6afSJens Wiklander		ldr	r1, =\vend
3165727b6afSJens Wiklander		ldr	r1, [r1]
31765401337SJens Wiklander		sub	r1, r1, r0
31865401337SJens Wiklander		bl	dcache_cleaninv_range
31965401337SJens Wiklander	.endm
32065401337SJens Wiklander
32165401337SJens Wiklander#define inval_cache_vrange(vbase, vend) \
32265401337SJens Wiklander		__inval_cache_vrange vbase, vend, __LINE__
32365401337SJens Wiklander
32465401337SJens Wiklander#define flush_cache_vrange(vbase, vend) \
32565401337SJens Wiklander		__flush_cache_vrange vbase, vend, __LINE__
32665401337SJens Wiklander
32765401337SJens Wiklander#ifdef CFG_BOOT_SYNC_CPU
32865401337SJens Wiklander#define flush_cpu_semaphores \
32965401337SJens Wiklander		flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end)
33065401337SJens Wiklander#else
33165401337SJens Wiklander#define flush_cpu_semaphores
33265401337SJens Wiklander#endif
33365401337SJens Wiklander
33465401337SJens WiklanderLOCAL_FUNC reset_primary , : , .identity_map
33565401337SJens WiklanderUNWIND(	.cantunwind)
33665401337SJens Wiklander
337f332e77cSJens Wiklander	/* preserve r4-r8: bootargs */
33865401337SJens Wiklander
33965401337SJens Wiklander#ifdef CFG_WITH_PAGER
34065401337SJens Wiklander	/*
34165401337SJens Wiklander	 * Move init code into correct location and move hashes to a
34265401337SJens Wiklander	 * temporary safe location until the heap is initialized.
34365401337SJens Wiklander	 *
34465401337SJens Wiklander	 * The binary is built as:
34565401337SJens Wiklander	 * [Pager code, rodata and data] : In correct location
34665401337SJens Wiklander	 * [Init code and rodata] : Should be copied to __init_start
34765401337SJens Wiklander	 * [struct boot_embdata + data] : Should be saved before
34865401337SJens Wiklander	 * initializing pager, first uint32_t tells the length of the data
34965401337SJens Wiklander	 */
35065401337SJens Wiklander	ldr	r0, =__init_start	/* dst */
35165401337SJens Wiklander	ldr	r1, =__data_end 	/* src */
35265401337SJens Wiklander	ldr	r2, =__init_end
35365401337SJens Wiklander	sub	r2, r2, r0		/* init len */
35465401337SJens Wiklander	ldr	r12, [r1, r2]		/* length of hashes etc */
35565401337SJens Wiklander	add	r2, r2, r12		/* length of init and hashes etc */
35665401337SJens Wiklander	/* Copy backwards (as memmove) in case we're overlapping */
35765401337SJens Wiklander	add	r0, r0, r2		/* __init_start + len */
35865401337SJens Wiklander	add	r1, r1, r2		/* __data_end + len */
3595727b6afSJens Wiklander	ldr	r3, =boot_cached_mem_end
3605727b6afSJens Wiklander	str	r0, [r3]
36165401337SJens Wiklander	ldr	r2, =__init_start
36265401337SJens Wiklandercopy_init:
363f332e77cSJens Wiklander	ldmdb	r1!, {r3, r9-r12}
364f332e77cSJens Wiklander	stmdb	r0!, {r3, r9-r12}
36565401337SJens Wiklander	cmp	r0, r2
36665401337SJens Wiklander	bgt	copy_init
36765401337SJens Wiklander#else
36865401337SJens Wiklander	/*
36965401337SJens Wiklander	 * The binary is built as:
37065401337SJens Wiklander	 * [Core, rodata and data] : In correct location
371d461c892SJens Wiklander	 * [struct boot_embdata + data] : Should be moved to right before
372d461c892SJens Wiklander	 * __vcore_free_end, the first uint32_t tells the length of the
373d461c892SJens Wiklander	 * struct + data
37465401337SJens Wiklander	 */
37565401337SJens Wiklander	ldr	r1, =__data_end		/* src */
37665401337SJens Wiklander	ldr	r2, [r1]		/* struct boot_embdata::total_len */
377d461c892SJens Wiklander	/* dst */
378d461c892SJens Wiklander	ldr	r0, =__vcore_free_end
379d461c892SJens Wiklander	sub	r0, r0, r2
380d461c892SJens Wiklander	/* round down to beginning of page */
381d461c892SJens Wiklander	mov	r3,  #(SMALL_PAGE_SIZE - 1)
382d461c892SJens Wiklander	bic	r0, r0, r3
383d461c892SJens Wiklander	ldr	r3, =boot_embdata_ptr
384d461c892SJens Wiklander	str	r0, [r3]
38565401337SJens Wiklander	/* Copy backwards (as memmove) in case we're overlapping */
38665401337SJens Wiklander	add	r1, r1, r2
387d461c892SJens Wiklander	add	r2, r0, r2
3885727b6afSJens Wiklander	ldr	r3, =boot_cached_mem_end
3895727b6afSJens Wiklander	str	r2, [r3]
39065401337SJens Wiklander
39165401337SJens Wiklandercopy_init:
392f332e77cSJens Wiklander	ldmdb	r1!, {r3, r9-r12}
393d461c892SJens Wiklander	stmdb	r2!, {r3, r9-r12}
394d461c892SJens Wiklander	cmp	r2, r0
39565401337SJens Wiklander	bgt	copy_init
39665401337SJens Wiklander#endif
39765401337SJens Wiklander
39865401337SJens Wiklander	/*
39965401337SJens Wiklander	 * Clear .bss, this code obviously depends on the linker keeping
40065401337SJens Wiklander	 * start/end of .bss at least 8 byte aligned.
40165401337SJens Wiklander	 */
40265401337SJens Wiklander	ldr	r0, =__bss_start
40365401337SJens Wiklander	ldr	r1, =__bss_end
40465401337SJens Wiklander	mov	r2, #0
40565401337SJens Wiklander	mov	r3, #0
40665401337SJens Wiklanderclear_bss:
40765401337SJens Wiklander	stmia	r0!, {r2, r3}
40865401337SJens Wiklander	cmp	r0, r1
40965401337SJens Wiklander	bls	clear_bss
41065401337SJens Wiklander
411b76b2296SJerome Forissier#ifdef CFG_NS_VIRTUALIZATION
41265401337SJens Wiklander	/*
41365401337SJens Wiklander	 * Clear .nex_bss, this code obviously depends on the linker keeping
41465401337SJens Wiklander	 * start/end of .bss at least 8 byte aligned.
41565401337SJens Wiklander	 */
41665401337SJens Wiklander	ldr	r0, =__nex_bss_start
41765401337SJens Wiklander	ldr	r1, =__nex_bss_end
41865401337SJens Wiklander	mov	r2, #0
41965401337SJens Wiklander	mov	r3, #0
42065401337SJens Wiklanderclear_nex_bss:
42165401337SJens Wiklander	stmia	r0!, {r2, r3}
42265401337SJens Wiklander	cmp	r0, r1
42365401337SJens Wiklander	bls	clear_nex_bss
42465401337SJens Wiklander#endif
42565401337SJens Wiklander
42665401337SJens Wiklander#ifdef CFG_CORE_SANITIZE_KADDRESS
42765401337SJens Wiklander	/* First initialize the entire shadow area with no access */
42865401337SJens Wiklander	ldr	r0, =__asan_shadow_start	/* start */
42965401337SJens Wiklander	ldr	r1, =__asan_shadow_end	/* limit */
43065401337SJens Wiklander	mov	r2, #ASAN_DATA_RED_ZONE
43165401337SJens Wiklandershadow_no_access:
43265401337SJens Wiklander	str	r2, [r0], #4
43365401337SJens Wiklander	cmp	r0, r1
43465401337SJens Wiklander	bls	shadow_no_access
43565401337SJens Wiklander
436*2cd578baSJens Wiklander#if !defined(CFG_DYN_CONFIG)
43765401337SJens Wiklander	/* Mark the entire stack area as OK */
43865401337SJens Wiklander	ldr	r2, =CFG_ASAN_SHADOW_OFFSET
43965401337SJens Wiklander	ldr	r0, =__nozi_stack_start	/* start */
44065401337SJens Wiklander	lsr	r0, r0, #ASAN_BLOCK_SHIFT
44165401337SJens Wiklander	add	r0, r0, r2
44265401337SJens Wiklander	ldr	r1, =__nozi_stack_end	/* limit */
44365401337SJens Wiklander	lsr	r1, r1, #ASAN_BLOCK_SHIFT
44465401337SJens Wiklander	add	r1, r1, r2
44565401337SJens Wiklander	mov	r2, #0
44665401337SJens Wiklandershadow_stack_access_ok:
44765401337SJens Wiklander	strb	r2, [r0], #1
44865401337SJens Wiklander	cmp	r0, r1
44965401337SJens Wiklander	bls	shadow_stack_access_ok
45065401337SJens Wiklander#endif
451*2cd578baSJens Wiklander#endif
45265401337SJens Wiklander
453bb538722SAlvin Chang#if defined(CFG_DYN_CONFIG)
45459724f22SJens Wiklander	ldr	r0, =boot_embdata_ptr
45559724f22SJens Wiklander	ldr	r0, [r0]
45659724f22SJens Wiklander	sub	r1, r0, #THREAD_BOOT_INIT_TMP_ALLOC
45759724f22SJens Wiklander
45859724f22SJens Wiklander	/* Clear the allocated struct thread_core_local */
45959724f22SJens Wiklander	add	r2, r1, #THREAD_CORE_LOCAL_SIZE
46059724f22SJens Wiklander	mov	r3, #0
46159724f22SJens Wiklander1:	str	r3, [r2, #-4]!
46259724f22SJens Wiklander	cmp	r2, r1
46359724f22SJens Wiklander	bgt	1b
46459724f22SJens Wiklander
46559724f22SJens Wiklander	sub	r0, r0, #(__STACK_TMP_OFFS + __STACK_CANARY_SIZE)
46659724f22SJens Wiklander	mov	r2, #THREAD_ID_INVALID
46759724f22SJens Wiklander	str	r2, [r1, #THREAD_CORE_LOCAL_CURR_THREAD]
46859724f22SJens Wiklander	mov	r2, #THREAD_CLF_TMP
46959724f22SJens Wiklander	str	r2, [r1, #THREAD_CORE_LOCAL_FLAGS]
47059724f22SJens Wiklander	str	r0, [r1, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
47159724f22SJens Wiklander	add	r2, r1, #(THREAD_BOOT_INIT_TMP_ALLOC / 2)
47259724f22SJens Wiklander	sub	r2, r2, #__STACK_CANARY_SIZE
47359724f22SJens Wiklander	str	r2, [r1, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
47459724f22SJens Wiklander	mov	sp, r0
47559724f22SJens Wiklander	cps	#CPSR_MODE_IRQ
47659724f22SJens Wiklander	mov	sp, r0
47759724f22SJens Wiklander	cps	#CPSR_MODE_FIQ
47859724f22SJens Wiklander	mov	sp, r0
47959724f22SJens Wiklander	cps	#CPSR_MODE_ABT
48059724f22SJens Wiklander	mov	sp, r1
48159724f22SJens Wiklander	cps	#CPSR_MODE_UND
48259724f22SJens Wiklander	mov	sp, r1
48359724f22SJens Wiklander	cps	#CPSR_MODE_SVC
48459724f22SJens Wiklander	/*
48559724f22SJens Wiklander	 * Record a single core, to be changed later before secure world
48659724f22SJens Wiklander	 * boot is done.
48759724f22SJens Wiklander	 */
48859724f22SJens Wiklander	ldr	r2, =thread_core_local
48959724f22SJens Wiklander	str	r1, [r2]
49059724f22SJens Wiklander	ldr	r2, =thread_core_count
49159724f22SJens Wiklander	mov	r0, #1
49259724f22SJens Wiklander	str	r0, [r2]
49359724f22SJens Wiklander#else
49465401337SJens Wiklander	set_sp
49565401337SJens Wiklander
496758c3687SJens Wiklander	/* Initialize thread_core_local[current_cpu_id] for early boot */
497b5ec8152SJens Wiklander	bl	thread_get_core_local
498b5ec8152SJens Wiklander	push	{r0,r1}
499b5ec8152SJens Wiklander	bl	thread_get_abt_stack
500b5ec8152SJens Wiklander	pop	{r1,r2}
501b5ec8152SJens Wiklander	mov	r3, sp
502b5ec8152SJens Wiklander
503b5ec8152SJens Wiklander	cps	#CPSR_MODE_IRQ
504b5ec8152SJens Wiklander	mov	sp, r3
505b5ec8152SJens Wiklander	cps	#CPSR_MODE_FIQ
506b5ec8152SJens Wiklander	mov	sp, r3
507b5ec8152SJens Wiklander	cps	#CPSR_MODE_ABT
508b5ec8152SJens Wiklander	mov	sp, r1
509b5ec8152SJens Wiklander	cps	#CPSR_MODE_UND
510b5ec8152SJens Wiklander	mov	sp, r1
511b5ec8152SJens Wiklander	cps	#CPSR_MODE_SVC
512b5ec8152SJens Wiklander
513b5ec8152SJens Wiklander	str	sp, [r1, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
514b5ec8152SJens Wiklander	str	r0, [r1, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
515b5ec8152SJens Wiklander	mov	r0, #THREAD_ID_INVALID
516b5ec8152SJens Wiklander	str	r0, [r1, #THREAD_CORE_LOCAL_CURR_THREAD]
517b5ec8152SJens Wiklander	mov	r0, #THREAD_CLF_TMP
518b5ec8152SJens Wiklander	str	r0, [r1, #THREAD_CORE_LOCAL_FLAGS]
51959724f22SJens Wiklander#endif
520b166fabfSJerome Forissier
52165401337SJens Wiklander	/* complete ARM secure MP common configuration */
52265401337SJens Wiklander	bl	plat_primary_init_early
52365401337SJens Wiklander
52465401337SJens Wiklander	/* Enable Console */
52565401337SJens Wiklander	bl	console_init
52665401337SJens Wiklander
527f332e77cSJens Wiklander	mov	r0, r8
528f332e77cSJens Wiklander	mov	r1, #0
529f332e77cSJens Wiklander	push	{r0, r1}
530f332e77cSJens Wiklander	mov	r0, r4
531f332e77cSJens Wiklander	mov	r1, r5
532f332e77cSJens Wiklander	mov	r2, r6
533f332e77cSJens Wiklander	mov	r3, r7
534f332e77cSJens Wiklander	bl	boot_save_args
535f332e77cSJens Wiklander	add	sp, sp, #(2 * 4)
536f332e77cSJens Wiklander
537d461c892SJens Wiklander#ifdef CFG_WITH_PAGER
538d461c892SJens Wiklander	ldr	r0, =__init_end	/* pointer to boot_embdata */
539d461c892SJens Wiklander	ldr	r1, [r0]	/* struct boot_embdata::total_len */
540d461c892SJens Wiklander	add	r0, r0, r1
541d461c892SJens Wiklander	mov_imm	r1, 0xfff
542d461c892SJens Wiklander	add	r0, r0, r1	/* round up */
543d461c892SJens Wiklander	bic	r0, r0, r1	/* to next page */
544d461c892SJens Wiklander	mov_imm r1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
545d461c892SJens Wiklander	mov	r2, r1
546d461c892SJens Wiklander#else
547d461c892SJens Wiklander	ldr	r0, =__vcore_free_start
548d461c892SJens Wiklander	ldr	r1, =boot_embdata_ptr
549d461c892SJens Wiklander	ldr	r1, [r1]
550bb538722SAlvin Chang#ifdef CFG_DYN_CONFIG
55159724f22SJens Wiklander	sub	r1, r1, #THREAD_BOOT_INIT_TMP_ALLOC
55259724f22SJens Wiklander#endif
553d461c892SJens Wiklander	ldr	r2, =__vcore_free_end
554d461c892SJens Wiklander#endif
555d461c892SJens Wiklander	bl	boot_mem_init
556d461c892SJens Wiklander
55765401337SJens Wiklander#ifdef CFG_PL310
55865401337SJens Wiklander	bl	pl310_base
55965401337SJens Wiklander	bl	arm_cl2_config
56065401337SJens Wiklander#endif
56165401337SJens Wiklander
56265401337SJens Wiklander	/*
56365401337SJens Wiklander	 * Invalidate dcache for all memory used during initialization to
56465401337SJens Wiklander	 * avoid nasty surprices when the cache is turned on. We must not
56565401337SJens Wiklander	 * invalidate memory not used by OP-TEE since we may invalidate
56665401337SJens Wiklander	 * entries used by for instance ARM Trusted Firmware.
56765401337SJens Wiklander	 */
5685727b6afSJens Wiklander	inval_cache_vrange(cached_mem_start, boot_cached_mem_end)
56965401337SJens Wiklander
57065401337SJens Wiklander#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
57165401337SJens Wiklander	/* Enable PL310 if not yet enabled */
57265401337SJens Wiklander	bl	pl310_base
57365401337SJens Wiklander	bl	arm_cl2_enable
57465401337SJens Wiklander#endif
57565401337SJens Wiklander
57659724f22SJens Wiklander#if !defined(CFG_WITH_ARM_TRUSTED_FW)
57759724f22SJens Wiklander	ldr	r0, =thread_core_local
57859724f22SJens Wiklander	ldr	r0, [r0]
57959724f22SJens Wiklander	ldr	r1, =thread_core_local_pa
58059724f22SJens Wiklander	str	r0, [r1]
58159724f22SJens Wiklander#endif
58259724f22SJens Wiklander
58365401337SJens Wiklander#ifdef CFG_CORE_ASLR
58465401337SJens Wiklander	bl	get_aslr_seed
58572f437a7SJens Wiklander#ifdef CFG_CORE_ASLR_SEED
58672f437a7SJens Wiklander	mov_imm	r0, CFG_CORE_ASLR_SEED
58772f437a7SJens Wiklander#endif
58865401337SJens Wiklander#else
58965401337SJens Wiklander	mov	r0, #0
59065401337SJens Wiklander#endif
59165401337SJens Wiklander
59265401337SJens Wiklander	ldr	r1, =boot_mmu_config
59365401337SJens Wiklander	bl	core_init_mmu_map
59465401337SJens Wiklander
59565401337SJens Wiklander#ifdef CFG_CORE_ASLR
59665401337SJens Wiklander	/*
597b5ec8152SJens Wiklander	 * Save a pointer to thread_core_local[core_pos] since we can't
598b5ec8152SJens Wiklander	 * call thread_get_core_local() again before the recorded end_va's
599b5ec8152SJens Wiklander	 * have been updated below.
600b5ec8152SJens Wiklander	 */
601b5ec8152SJens Wiklander	bl	thread_get_core_local
602b5ec8152SJens Wiklander	mov	r4, r0
603b5ec8152SJens Wiklander
604b5ec8152SJens Wiklander	/*
605c79fb6d4SJens Wiklander	 * Process relocation information for updating with the virtual map
606c79fb6d4SJens Wiklander	 * offset.  We're doing this now before MMU is enabled as some of
607c79fb6d4SJens Wiklander	 * the memory will become write protected.
60865401337SJens Wiklander	 */
60965401337SJens Wiklander	ldr	r0, =boot_mmu_config
610c79fb6d4SJens Wiklander	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
61165401337SJens Wiklander	/*
6125727b6afSJens Wiklander	 * Update boot_cached_mem_end address with load offset since it was
61365401337SJens Wiklander	 * calculated before relocation.
61465401337SJens Wiklander	 */
6155727b6afSJens Wiklander	ldr	r3, =boot_cached_mem_end
6165727b6afSJens Wiklander	ldr	r2, [r3]
61765401337SJens Wiklander	add	r2, r2, r0
6185727b6afSJens Wiklander	str	r2, [r3]
61965401337SJens Wiklander
62065401337SJens Wiklander	bl	relocate
62165401337SJens Wiklander#endif
62265401337SJens Wiklander
62365401337SJens Wiklander	bl	__get_core_pos
62465401337SJens Wiklander	bl	enable_mmu
62565401337SJens Wiklander#ifdef CFG_CORE_ASLR
62665401337SJens Wiklander	/*
627b5ec8152SJens Wiklander	 * Update recorded end_va, we depend on r4 pointing to the
628b5ec8152SJens Wiklander	 * pre-relocated thread_core_local[core_pos].
629abb35419SJens Wiklander	 *
630abb35419SJens Wiklander	 * This must be done before calling into C code to make sure that
631abb35419SJens Wiklander	 * the stack pointer matches what we have in thread_core_local[].
632b5ec8152SJens Wiklander	 */
633b5ec8152SJens Wiklander	ldr	r1, =boot_mmu_config
634b5ec8152SJens Wiklander	ldr	r1, [r1, #CORE_MMU_CONFIG_MAP_OFFSET]
635bb538722SAlvin Chang#if defined(CFG_DYN_CONFIG)
63659724f22SJens Wiklander	ldr	r0, =thread_core_local
63759724f22SJens Wiklander	add	r2, r4, r1
63859724f22SJens Wiklander	str	r2, [r0]
63959724f22SJens Wiklander#endif
640b5ec8152SJens Wiklander	add	r4, r4, r1
641b5ec8152SJens Wiklander	ldr	r0, [r4, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
642b5ec8152SJens Wiklander	add	r0, r0, r1
643b5ec8152SJens Wiklander	str	r0, [r4, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
644b5ec8152SJens Wiklander	ldr	r0, [r4, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
645b5ec8152SJens Wiklander	add	r0, r0, r1
646b5ec8152SJens Wiklander	str	r0, [r4, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
647b5ec8152SJens Wiklander
648b5ec8152SJens Wiklander	cps	#CPSR_MODE_IRQ
649b5ec8152SJens Wiklander	mov	sp, r0
650b5ec8152SJens Wiklander	cps	#CPSR_MODE_FIQ
651b5ec8152SJens Wiklander	mov	sp, r0
652b5ec8152SJens Wiklander	cps	#CPSR_MODE_ABT
653b5ec8152SJens Wiklander	mov	sp, r4
654b5ec8152SJens Wiklander	cps	#CPSR_MODE_UND
655b5ec8152SJens Wiklander	mov	sp, r4
656b5ec8152SJens Wiklander	cps	#CPSR_MODE_SVC
657b5ec8152SJens Wiklander
658abb35419SJens Wiklander	/* Update relocations recorded with boot_mem_add_reloc() */
659abb35419SJens Wiklander	ldr	r0, =boot_mmu_config
660abb35419SJens Wiklander	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
661abb35419SJens Wiklander	bl	boot_mem_relocate
662b5ec8152SJens Wiklander	/*
66365401337SJens Wiklander	 * Reinitialize console, since register_serial_console() has
66465401337SJens Wiklander	 * previously registered a PA and with ASLR the VA is different
66565401337SJens Wiklander	 * from the PA.
66665401337SJens Wiklander	 */
66765401337SJens Wiklander	bl	console_init
66865401337SJens Wiklander#endif
66965401337SJens Wiklander
670b76b2296SJerome Forissier#ifdef CFG_NS_VIRTUALIZATION
671fb2b1fd8SRuchika Gupta	/*
672fb2b1fd8SRuchika Gupta	 * Initialize partition tables for each partition to
673fb2b1fd8SRuchika Gupta	 * default_partition which has been relocated now to a different VA
674fb2b1fd8SRuchika Gupta	 */
675fb2b1fd8SRuchika Gupta	bl	core_mmu_set_default_prtn_tbl
676fb2b1fd8SRuchika Gupta#endif
677fb2b1fd8SRuchika Gupta
67859ac3801SJens Wiklander	bl	boot_init_primary_early
679b5ec8152SJens Wiklander	bl	boot_init_primary_late
68059724f22SJens Wiklander
681bb538722SAlvin Chang#if defined(CFG_DYN_CONFIG)
68259724f22SJens Wiklander#if !defined(CFG_WITH_ARM_TRUSTED_FW)
68359724f22SJens Wiklander	/* Update thread_core_local_pa with a new physical address */
68459724f22SJens Wiklander	ldr	r0, =__thread_core_local_new
68559724f22SJens Wiklander	ldr	r0, [r0]
68659724f22SJens Wiklander	bl	virt_to_phys
68759724f22SJens Wiklander	ldr	r1, =thread_core_local_pa
68859724f22SJens Wiklander	str	r0, [r1]
68959724f22SJens Wiklander#endif
69059724f22SJens Wiklander	bl	__get_core_pos
69159724f22SJens Wiklander
69259724f22SJens Wiklander	/*
69359724f22SJens Wiklander	 * Switch to the new thread_core_local and thread_core_count and
69459724f22SJens Wiklander	 * keep the pointer to the new thread_core_local in r1.
69559724f22SJens Wiklander	 */
69659724f22SJens Wiklander	ldr	r1, =__thread_core_count_new
69759724f22SJens Wiklander	ldr	r1, [r1]
69859724f22SJens Wiklander	ldr 	r2, =thread_core_count;
69959724f22SJens Wiklander	str	r1, [r2]
70059724f22SJens Wiklander	ldr	r1, =__thread_core_local_new
70159724f22SJens Wiklander	ldr	r1, [r1]
70259724f22SJens Wiklander	ldr	r2, =thread_core_local
70359724f22SJens Wiklander	str	r1, [r2]
70459724f22SJens Wiklander
70559724f22SJens Wiklander	/*
70659724f22SJens Wiklander	 * Update to use the new stacks and thread_core_local. Clear
70759724f22SJens Wiklander	 * thread_core_local[0].stackcheck_recursion now that the stack
70859724f22SJens Wiklander	 * pointer matches recorded information.
70959724f22SJens Wiklander	 */
71059724f22SJens Wiklander	mov	r2, #THREAD_CORE_LOCAL_SIZE
71159724f22SJens Wiklander	/* r3 = r2 * r0 + r1 */
71259724f22SJens Wiklander	mla	r3, r2, r0, r1
71359724f22SJens Wiklander	ldr	r0, [r3, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
71459724f22SJens Wiklander	mov	sp, r0
71559724f22SJens Wiklander	cps	#CPSR_MODE_IRQ
71659724f22SJens Wiklander	mov	sp, r0
71759724f22SJens Wiklander	cps	#CPSR_MODE_FIQ
71859724f22SJens Wiklander	mov	sp, r0
71959724f22SJens Wiklander	cps	#CPSR_MODE_ABT
72059724f22SJens Wiklander	mov	sp, r3
72159724f22SJens Wiklander	cps	#CPSR_MODE_UND
72259724f22SJens Wiklander	mov	sp, r3
72359724f22SJens Wiklander	cps	#CPSR_MODE_SVC
72459724f22SJens Wiklander#endif
72559724f22SJens Wiklander
726b76b2296SJerome Forissier#ifndef CFG_NS_VIRTUALIZATION
727a0df5402SClément Léger	mov	r9, sp
72859ac3801SJens Wiklander	ldr	r0, =threads
72991d4649dSJens Wiklander	ldr	r0, [r0]
73059ac3801SJens Wiklander	ldr	r0, [r0, #THREAD_CTX_STACK_VA_END]
73159ac3801SJens Wiklander	mov	sp, r0
7321d88c0c0SJerome Forissier	bl	thread_get_core_local
7331d88c0c0SJerome Forissier	mov	r8, r0
7341d88c0c0SJerome Forissier	mov	r0, #0
7351d88c0c0SJerome Forissier	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
73659ac3801SJens Wiklander#endif
737b0da0d59SJens Wiklander	bl	boot_init_primary_runtime
738faf09045SJens Wiklander	bl	boot_init_primary_final
739b76b2296SJerome Forissier#ifndef CFG_NS_VIRTUALIZATION
7401d88c0c0SJerome Forissier	mov	r0, #THREAD_CLF_TMP
7411d88c0c0SJerome Forissier	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
742a0df5402SClément Léger	mov	sp, r9
74359ac3801SJens Wiklander#endif
74465401337SJens Wiklander
74545507d10SKhoa Hoang#ifdef _CFG_CORE_STACK_PROTECTOR
74645507d10SKhoa Hoang	/* Update stack canary value */
747b89b3da2SVincent Chuang	sub	sp, sp, #0x8
748b89b3da2SVincent Chuang	mov	r0, sp
749b89b3da2SVincent Chuang	mov	r1, #1
750b89b3da2SVincent Chuang	mov	r2, #0x4
751b89b3da2SVincent Chuang	bl	plat_get_random_stack_canaries
752b89b3da2SVincent Chuang	ldr	r0, [sp]
75345507d10SKhoa Hoang	ldr	r1, =__stack_chk_guard
75445507d10SKhoa Hoang	str	r0, [r1]
755b89b3da2SVincent Chuang	add	sp, sp, #0x8
75645507d10SKhoa Hoang#endif
75745507d10SKhoa Hoang
75865401337SJens Wiklander	/*
75965401337SJens Wiklander	 * In case we've touched memory that secondary CPUs will use before
76065401337SJens Wiklander	 * they have turned on their D-cache, clean and invalidate the
76165401337SJens Wiklander	 * D-cache before exiting to normal world.
76265401337SJens Wiklander	 */
7635727b6afSJens Wiklander	flush_cache_vrange(cached_mem_start, boot_cached_mem_end)
76465401337SJens Wiklander
76565401337SJens Wiklander	/* release secondary boot cores and sync with them */
76665401337SJens Wiklander	cpu_is_ready
76765401337SJens Wiklander	flush_cpu_semaphores
76865401337SJens Wiklander	wait_secondary
76965401337SJens Wiklander
77065401337SJens Wiklander#ifdef CFG_PL310_LOCKED
77165401337SJens Wiklander#ifdef CFG_PL310_SIP_PROTOCOL
77265401337SJens Wiklander#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y"
77365401337SJens Wiklander#endif
77465401337SJens Wiklander	/* lock/invalidate all lines: pl310 behaves as if disable */
77565401337SJens Wiklander	bl	pl310_base
77665401337SJens Wiklander	bl	arm_cl2_lockallways
77765401337SJens Wiklander	bl	pl310_base
77865401337SJens Wiklander	bl	arm_cl2_cleaninvbyway
77965401337SJens Wiklander#endif
78065401337SJens Wiklander
78165401337SJens Wiklander	/*
78265401337SJens Wiklander	 * Clear current thread id now to allow the thread to be reused on
78365401337SJens Wiklander	 * next entry. Matches the thread_init_boot_thread() in
78465401337SJens Wiklander	 * boot.c.
78565401337SJens Wiklander	 */
786b76b2296SJerome Forissier#ifndef CFG_NS_VIRTUALIZATION
78765401337SJens Wiklander	bl 	thread_clr_boot_thread
78859ac3801SJens Wiklander#endif
78965401337SJens Wiklander
7901b302ac0SJens Wiklander#ifdef CFG_CORE_FFA
7911b302ac0SJens Wiklander	ldr	r0, =cpu_on_handler
7921b302ac0SJens Wiklander	/*
793c79fb6d4SJens Wiklander	 * Compensate for the virtual map offset since cpu_on_handler() is
7941b302ac0SJens Wiklander	 * called with MMU off.
7951b302ac0SJens Wiklander	 */
796c79fb6d4SJens Wiklander	ldr	r1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
7971b302ac0SJens Wiklander	sub	r0, r0, r1
798c64fa9c5SJens Wiklander	bl	thread_spmc_register_secondary_ep
7991b302ac0SJens Wiklander	b	thread_ffa_msg_wait
8001b302ac0SJens Wiklander#else /* CFG_CORE_FFA */
8011b302ac0SJens Wiklander
80265401337SJens Wiklander#if defined(CFG_WITH_ARM_TRUSTED_FW)
80365401337SJens Wiklander	ldr	r0, =boot_mmu_config
804c79fb6d4SJens Wiklander	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
80565401337SJens Wiklander	ldr	r1, =thread_vector_table
80665401337SJens Wiklander	/* Pass the vector address returned from main_init */
80765401337SJens Wiklander	sub	r1, r1, r0
80865401337SJens Wiklander#else
809358cdcd2SJens Wiklander	/* Initialize secure monitor */
810358cdcd2SJens Wiklander	add	r0, sp, #__STACK_TMP_OFFS
811358cdcd2SJens Wiklander	bl	sm_init
812358cdcd2SJens Wiklander	ldr	r0, =boot_arg_nsec_entry
813358cdcd2SJens Wiklander	ldr	r0, [r0]
814358cdcd2SJens Wiklander	bl	init_sec_mon
815358cdcd2SJens Wiklander
816f332e77cSJens Wiklander	/* Relay standard bootarg #1 and #2 to non-secure entry */
81765401337SJens Wiklander	mov	r4, #0
81865401337SJens Wiklander	mov	r3, r6		/* std bootarg #2 for register R2 */
819f332e77cSJens Wiklander	mov	r2, r5		/* std bootarg #1 for register R1 */
82065401337SJens Wiklander	mov	r1, #0
82165401337SJens Wiklander#endif /* CFG_WITH_ARM_TRUSTED_FW */
82265401337SJens Wiklander
82365401337SJens Wiklander	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
82465401337SJens Wiklander	smc	#0
8250c9404e1SJens Wiklander	/* SMC should not return */
8260c9404e1SJens Wiklander	panic_at_smc_return
8271b302ac0SJens Wiklander#endif /* CFG_CORE_FFA */
82865401337SJens WiklanderEND_FUNC reset_primary
82965401337SJens Wiklander
83065401337SJens Wiklander#ifdef CFG_BOOT_SYNC_CPU
83165401337SJens WiklanderLOCAL_DATA sem_cpu_sync_start , :
83265401337SJens Wiklander	.word	sem_cpu_sync
83365401337SJens WiklanderEND_DATA sem_cpu_sync_start
83465401337SJens Wiklander
83565401337SJens WiklanderLOCAL_DATA sem_cpu_sync_end , :
83665401337SJens Wiklander	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
83765401337SJens WiklanderEND_DATA sem_cpu_sync_end
83865401337SJens Wiklander#endif
83965401337SJens Wiklander
84065401337SJens WiklanderLOCAL_DATA cached_mem_start , :
84165401337SJens Wiklander	.word	__text_start
84265401337SJens WiklanderEND_DATA cached_mem_start
84365401337SJens Wiklander
844d461c892SJens Wiklander#ifndef CFG_WITH_PAGER
845d461c892SJens WiklanderLOCAL_DATA boot_embdata_ptr , :
846d461c892SJens Wiklander	.skip	4
847d461c892SJens WiklanderEND_DATA boot_embdata_ptr
848d461c892SJens Wiklander#endif
849d461c892SJens Wiklander
85065401337SJens WiklanderLOCAL_FUNC unhandled_cpu , :
85165401337SJens Wiklander	wfi
85265401337SJens Wiklander	b	unhandled_cpu
85365401337SJens WiklanderEND_FUNC unhandled_cpu
85465401337SJens Wiklander
85565401337SJens Wiklander#ifdef CFG_CORE_ASLR
85665401337SJens WiklanderLOCAL_FUNC relocate , :
85765401337SJens Wiklander	push	{r4-r5}
85865401337SJens Wiklander	/* r0 holds load offset */
85965401337SJens Wiklander#ifdef CFG_WITH_PAGER
86065401337SJens Wiklander	ldr	r12, =__init_end
86165401337SJens Wiklander#else
862d461c892SJens Wiklander	ldr	r12, =boot_embdata_ptr
863d461c892SJens Wiklander	ldr	r12, [r12]
86465401337SJens Wiklander#endif
86565401337SJens Wiklander	ldr	r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
86665401337SJens Wiklander	ldr	r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
86765401337SJens Wiklander
868460c9735SJens Wiklander	mov_imm	r1, TEE_LOAD_ADDR
86965401337SJens Wiklander	add	r2, r2, r12	/* start of relocations */
87065401337SJens Wiklander	add	r3, r3, r2	/* end of relocations */
87165401337SJens Wiklander
87265401337SJens Wiklander	/*
87365401337SJens Wiklander	 * Relocations are not formatted as Rel32, instead they are in a
87465401337SJens Wiklander	 * compressed format created by get_reloc_bin() in
87565401337SJens Wiklander	 * scripts/gen_tee_bin.py
87665401337SJens Wiklander	 *
877460c9735SJens Wiklander	 * All the R_ARM_RELATIVE relocations are translated into a list of
878460c9735SJens Wiklander	 * 32-bit offsets from TEE_LOAD_ADDR. At each address a 32-bit
879460c9735SJens Wiklander	 * value pointed out which increased with the load offset.
88065401337SJens Wiklander	 */
88165401337SJens Wiklander
88265401337SJens Wiklander#ifdef CFG_WITH_PAGER
88365401337SJens Wiklander	/*
88465401337SJens Wiklander	 * With pager enabled we can only relocate the pager and init
88565401337SJens Wiklander	 * parts, the rest has to be done when a page is populated.
88665401337SJens Wiklander	 */
88765401337SJens Wiklander	sub	r12, r12, r1
88865401337SJens Wiklander#endif
88965401337SJens Wiklander
89065401337SJens Wiklander	b	2f
89165401337SJens Wiklander	/* Loop over the relocation addresses and process all entries */
89265401337SJens Wiklander1:	ldr	r4, [r2], #4
89365401337SJens Wiklander#ifdef CFG_WITH_PAGER
89465401337SJens Wiklander	/* Skip too large addresses */
89565401337SJens Wiklander	cmp	r4, r12
89665401337SJens Wiklander	bge	2f
89765401337SJens Wiklander#endif
89865401337SJens Wiklander	ldr	r5, [r4, r1]
89965401337SJens Wiklander	add	r5, r5, r0
90065401337SJens Wiklander	str	r5, [r4, r1]
90165401337SJens Wiklander
90265401337SJens Wiklander2:	cmp	r2, r3
90365401337SJens Wiklander	bne	1b
90465401337SJens Wiklander
90565401337SJens Wiklander	pop	{r4-r5}
90665401337SJens Wiklander	bx	lr
90765401337SJens WiklanderEND_FUNC relocate
90865401337SJens Wiklander#endif
90965401337SJens Wiklander
91065401337SJens Wiklander/*
91165401337SJens Wiklander * void enable_mmu(unsigned long core_pos);
91265401337SJens Wiklander *
91365401337SJens Wiklander * This function depends on being mapped with in the identity map where
91465401337SJens Wiklander * physical address and virtual address is the same. After MMU has been
91565401337SJens Wiklander * enabled the instruction pointer will be updated to execute as the new
91665401337SJens Wiklander * offset instead. Stack pointers and the return address are updated.
91765401337SJens Wiklander */
91865401337SJens WiklanderLOCAL_FUNC enable_mmu , : , .identity_map
91965401337SJens Wiklander	/* r0 = core pos */
92065401337SJens Wiklander	adr	r1, boot_mmu_config
92165401337SJens Wiklander
92265401337SJens Wiklander#ifdef CFG_WITH_LPAE
92365401337SJens Wiklander	ldm	r1!, {r2, r3}
92465401337SJens Wiklander	/*
92565401337SJens Wiklander	 * r2 = ttbcr
92665401337SJens Wiklander	 * r3 = mair0
92765401337SJens Wiklander	 */
92865401337SJens Wiklander	write_ttbcr r2
92965401337SJens Wiklander	write_mair0 r3
93065401337SJens Wiklander
93165401337SJens Wiklander	ldm	r1!, {r2, r3}
93265401337SJens Wiklander	/*
93365401337SJens Wiklander	 * r2 = ttbr0_base
93465401337SJens Wiklander	 * r3 = ttbr0_core_offset
93565401337SJens Wiklander	 */
93665401337SJens Wiklander
93765401337SJens Wiklander	/*
93865401337SJens Wiklander	 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos
93965401337SJens Wiklander	 */
94065401337SJens Wiklander	mla	r12, r0, r3, r2
94165401337SJens Wiklander	mov	r0, #0
94265401337SJens Wiklander	write_ttbr0_64bit r12, r0
94365401337SJens Wiklander	write_ttbr1_64bit r0, r0
94465401337SJens Wiklander#else
94565401337SJens Wiklander	ldm	r1!, {r2, r3}
94665401337SJens Wiklander	/*
94765401337SJens Wiklander	 * r2 = prrr
94865401337SJens Wiklander	 * r3 = nmrr
94965401337SJens Wiklander	 */
95065401337SJens Wiklander	write_prrr r2
95165401337SJens Wiklander	write_nmrr r3
95265401337SJens Wiklander
95365401337SJens Wiklander	ldm	r1!, {r2, r3}
95465401337SJens Wiklander	/*
95565401337SJens Wiklander	 * r2 = dacr
95665401337SJens Wiklander	 * r3 = ttbcr
95765401337SJens Wiklander	 */
95865401337SJens Wiklander	write_dacr r2
95965401337SJens Wiklander	write_ttbcr r3
96065401337SJens Wiklander
96165401337SJens Wiklander	ldm	r1!, {r2}
96265401337SJens Wiklander	/* r2 = ttbr */
96365401337SJens Wiklander	write_ttbr0 r2
96465401337SJens Wiklander	write_ttbr1 r2
96565401337SJens Wiklander
96665401337SJens Wiklander	mov	r2, #0
96765401337SJens Wiklander	write_contextidr r2
96865401337SJens Wiklander#endif
96965401337SJens Wiklander	ldm	r1!, {r2}
97065401337SJens Wiklander	/* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */
97165401337SJens Wiklander	isb
97265401337SJens Wiklander
97365401337SJens Wiklander	/* Invalidate TLB */
97465401337SJens Wiklander	write_tlbiall
97565401337SJens Wiklander
97665401337SJens Wiklander	/*
97765401337SJens Wiklander	 * Make sure translation table writes have drained into memory and
97865401337SJens Wiklander	 * the TLB invalidation is complete.
97965401337SJens Wiklander	 */
98065401337SJens Wiklander	dsb	sy
98165401337SJens Wiklander	isb
98265401337SJens Wiklander
98365401337SJens Wiklander	read_sctlr r0
98465401337SJens Wiklander	orr	r0, r0, #SCTLR_M
98565401337SJens Wiklander#ifndef CFG_WITH_LPAE
98665401337SJens Wiklander	/* Enable Access flag (simplified access permissions) and TEX remap */
98765401337SJens Wiklander	orr	r0, r0, #(SCTLR_AFE | SCTLR_TRE)
98865401337SJens Wiklander#endif
98965401337SJens Wiklander	write_sctlr r0
99065401337SJens Wiklander	isb
99165401337SJens Wiklander
99265401337SJens Wiklander	/* Update vbar */
99365401337SJens Wiklander	read_vbar r1
99465401337SJens Wiklander	add	r1, r1, r2
99565401337SJens Wiklander	write_vbar r1
99665401337SJens Wiklander	isb
99765401337SJens Wiklander
99865401337SJens Wiklander	/* Invalidate instruction cache and branch predictor */
99965401337SJens Wiklander	write_iciallu
100065401337SJens Wiklander	write_bpiall
100165401337SJens Wiklander	isb
100265401337SJens Wiklander
100365401337SJens Wiklander	read_sctlr r0
100465401337SJens Wiklander	/* Enable I and D cache */
100565401337SJens Wiklander	orr	r0, r0, #SCTLR_I
100665401337SJens Wiklander	orr	r0, r0, #SCTLR_C
100765401337SJens Wiklander#if defined(CFG_ENABLE_SCTLR_Z)
100865401337SJens Wiklander	/*
100965401337SJens Wiklander	 * This is only needed on ARMv7 architecture and hence conditionned
101065401337SJens Wiklander	 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent
101165401337SJens Wiklander	 * architectures, the program flow prediction is automatically
101265401337SJens Wiklander	 * enabled upon MMU enablement.
101365401337SJens Wiklander	 */
101465401337SJens Wiklander	orr	r0, r0, #SCTLR_Z
101565401337SJens Wiklander#endif
101665401337SJens Wiklander	write_sctlr r0
101765401337SJens Wiklander	isb
101865401337SJens Wiklander
101965401337SJens Wiklander	/* Adjust stack pointer and return address */
102065401337SJens Wiklander	add	sp, sp, r2
102165401337SJens Wiklander	add	lr, lr, r2
102265401337SJens Wiklander
102365401337SJens Wiklander	bx	lr
102465401337SJens WiklanderEND_FUNC enable_mmu
102565401337SJens Wiklander
1026bb538722SAlvin Chang#if !defined(CFG_DYN_CONFIG)
1027528dabb2SJerome ForissierLOCAL_DATA stack_tmp_rel , :
1028528dabb2SJerome Forissier	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
1029528dabb2SJerome ForissierEND_DATA stack_tmp_rel
103065401337SJens Wiklander
103165401337SJens WiklanderLOCAL_DATA stack_tmp_stride_rel , :
103265401337SJens Wiklander	.word	stack_tmp_stride - stack_tmp_stride_rel
103365401337SJens WiklanderEND_DATA stack_tmp_stride_rel
103459724f22SJens Wiklander#endif
103565401337SJens Wiklander
103665401337SJens WiklanderDATA boot_mmu_config , : /* struct core_mmu_config */
103765401337SJens Wiklander	.skip	CORE_MMU_CONFIG_SIZE
103865401337SJens WiklanderEND_DATA boot_mmu_config
103965401337SJens Wiklander
104065401337SJens Wiklander#if defined(CFG_WITH_ARM_TRUSTED_FW)
104165401337SJens WiklanderFUNC cpu_on_handler , : , .identity_map
104265401337SJens WiklanderUNWIND(	.cantunwind)
104365401337SJens Wiklander	mov	r4, r0
104465401337SJens Wiklander	mov	r5, r1
104565401337SJens Wiklander	mov	r6, lr
104665401337SJens Wiklander
104765401337SJens Wiklander	set_sctlr
104865401337SJens Wiklander	isb
104965401337SJens Wiklander
1050d9d38bf9SJens Wiklander	ldr	r0, =reset_vect_table
105165401337SJens Wiklander	write_vbar r0
105265401337SJens Wiklander
105365401337SJens Wiklander	mov	r4, lr
105465401337SJens Wiklander
105565401337SJens Wiklander	bl	__get_core_pos
105665401337SJens Wiklander	bl	enable_mmu
105765401337SJens Wiklander
105859724f22SJens Wiklander	/*
105959724f22SJens Wiklander	 * Use the stacks from thread_core_local.
106059724f22SJens Wiklander	 */
106159724f22SJens Wiklander	bl	__get_core_pos
106259724f22SJens Wiklander	ldr	r1, =thread_core_local
106359724f22SJens Wiklander	ldr	r1, [r1]
106459724f22SJens Wiklander	mov	r2, #THREAD_CORE_LOCAL_SIZE
106559724f22SJens Wiklander	/* r3 = r2 * r0 + r1 */
106659724f22SJens Wiklander	mla	r3, r2, r0, r1
106759724f22SJens Wiklander	ldr	r0, [r3, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
106859724f22SJens Wiklander	mov	sp, r0
106959724f22SJens Wiklander	cps	#CPSR_MODE_IRQ
107059724f22SJens Wiklander	mov	sp, r0
107159724f22SJens Wiklander	cps	#CPSR_MODE_FIQ
107259724f22SJens Wiklander	mov	sp, r0
107359724f22SJens Wiklander	cps	#CPSR_MODE_ABT
107459724f22SJens Wiklander	mov	sp, r3
107559724f22SJens Wiklander	cps	#CPSR_MODE_UND
107659724f22SJens Wiklander	mov	sp, r3
107759724f22SJens Wiklander	cps	#CPSR_MODE_SVC
107865401337SJens Wiklander
107965401337SJens Wiklander	mov	r0, r4
108065401337SJens Wiklander	mov	r1, r5
108165401337SJens Wiklander	bl	boot_cpu_on_handler
10821b302ac0SJens Wiklander#ifdef CFG_CORE_FFA
10831b302ac0SJens Wiklander	b	thread_ffa_msg_wait
10841b302ac0SJens Wiklander#else
108565401337SJens Wiklander	bx	r6
10861b302ac0SJens Wiklander#endif
108765401337SJens WiklanderEND_FUNC cpu_on_handler
108865401337SJens WiklanderDECLARE_KEEP_PAGER cpu_on_handler
108965401337SJens Wiklander
109065401337SJens Wiklander#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
109165401337SJens Wiklander
109265401337SJens WiklanderLOCAL_FUNC reset_secondary , : , .identity_map
109365401337SJens WiklanderUNWIND(	.cantunwind)
1094d9d38bf9SJens Wiklander	ldr	r0, =reset_vect_table
109565401337SJens Wiklander	write_vbar r0
109665401337SJens Wiklander
109765401337SJens Wiklander	wait_primary
109865401337SJens Wiklander
109959724f22SJens Wiklander	/*
110059724f22SJens Wiklander	 * Initialize stack pointer from thread_core_local, compensate for
110159724f22SJens Wiklander	 * ASLR if enabled.
110259724f22SJens Wiklander	 */
110359724f22SJens Wiklander#ifdef CFG_CORE_ASLR
110459724f22SJens Wiklander	ldr	r4, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
110559724f22SJens Wiklander#endif
110659724f22SJens Wiklander	bl	__get_core_pos
110759724f22SJens Wiklander	ldr	r1, =thread_core_local_pa
110859724f22SJens Wiklander#ifdef CFG_CORE_ASLR
110959724f22SJens Wiklander	sub	r1, r1, r4
111059724f22SJens Wiklander#endif
111159724f22SJens Wiklander	ldr	r1, [r1]
111259724f22SJens Wiklander	mov	r2, #THREAD_CORE_LOCAL_SIZE
111359724f22SJens Wiklander	/* r3 = r2 * r0 + r1 */
111459724f22SJens Wiklander	mla	r3, r2, r0, r1
111559724f22SJens Wiklander	ldr	r0, [r3, #THREAD_CORE_LOCAL_TMP_STACK_PA_END]
111659724f22SJens Wiklander	mov	sp, r0
111765401337SJens Wiklander
111865401337SJens Wiklander#if defined (CFG_BOOT_SECONDARY_REQUEST)
111965401337SJens Wiklander	/* if L1 is not invalidated before, do it here */
112065401337SJens Wiklander	mov	r0, #DCACHE_OP_INV
112165401337SJens Wiklander	bl	dcache_op_level1
112265401337SJens Wiklander#endif
112365401337SJens Wiklander
112465401337SJens Wiklander	bl	__get_core_pos
112565401337SJens Wiklander	bl	enable_mmu
112665401337SJens Wiklander
112759724f22SJens Wiklander	/*
112859724f22SJens Wiklander	 * Use the stacks from thread_core_local.
112959724f22SJens Wiklander	 */
113059724f22SJens Wiklander	bl	__get_core_pos
113159724f22SJens Wiklander	ldr	r1, =thread_core_local
113259724f22SJens Wiklander	ldr	r1, [r1]
113359724f22SJens Wiklander	mov	r2, #THREAD_CORE_LOCAL_SIZE
113459724f22SJens Wiklander	/* r3 = r2 * r0 + r1 */
113559724f22SJens Wiklander	mla	r3, r2, r0, r1
113659724f22SJens Wiklander	ldr	r0, [r3, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
113759724f22SJens Wiklander	mov	sp, r0
113859724f22SJens Wiklander	cps	#CPSR_MODE_IRQ
113959724f22SJens Wiklander	mov	sp, r0
114059724f22SJens Wiklander	cps	#CPSR_MODE_FIQ
114159724f22SJens Wiklander	mov	sp, r0
114259724f22SJens Wiklander	cps	#CPSR_MODE_ABT
114359724f22SJens Wiklander	mov	sp, r3
114459724f22SJens Wiklander	cps	#CPSR_MODE_UND
114559724f22SJens Wiklander	mov	sp, r3
114659724f22SJens Wiklander	cps	#CPSR_MODE_SVC
114759724f22SJens Wiklander
114865401337SJens Wiklander	cpu_is_ready
114965401337SJens Wiklander
115065401337SJens Wiklander#if defined (CFG_BOOT_SECONDARY_REQUEST)
115165401337SJens Wiklander	/*
115265401337SJens Wiklander	 * boot_core_hpen() return value (r0) is address of
115365401337SJens Wiklander	 * ns entry context structure
115465401337SJens Wiklander	 */
115565401337SJens Wiklander	bl	boot_core_hpen
115665401337SJens Wiklander	ldm	r0, {r0, r6}
1157358cdcd2SJens Wiklander	mov	r8, r0
115865401337SJens Wiklander#else
115965401337SJens Wiklander	mov	r6, #0
116065401337SJens Wiklander#endif
116165401337SJens Wiklander	bl	boot_init_secondary
116265401337SJens Wiklander
1163358cdcd2SJens Wiklander	/* Initialize secure monitor */
1164358cdcd2SJens Wiklander	add	r0, sp, #__STACK_TMP_OFFS
1165358cdcd2SJens Wiklander	bl	sm_init
1166358cdcd2SJens Wiklander	mov	r0, r8		/* ns-entry address */
1167358cdcd2SJens Wiklander	bl	init_sec_mon
1168358cdcd2SJens Wiklander
116965401337SJens Wiklander	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
117065401337SJens Wiklander	mov	r1, r6
117165401337SJens Wiklander	mov	r2, #0
117265401337SJens Wiklander	mov	r3, #0
117365401337SJens Wiklander	mov	r4, #0
117465401337SJens Wiklander	smc	#0
11750c9404e1SJens Wiklander	/* SMC should not return */
11760c9404e1SJens Wiklander	panic_at_smc_return
117765401337SJens WiklanderEND_FUNC reset_secondary
117865401337SJens WiklanderDECLARE_KEEP_PAGER reset_secondary
117965401337SJens Wiklander#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
1180