xref: /optee_os/core/arch/riscv/kernel/entry.S (revision 19bcbfd13462f5506018dffb5fc4416ab8ba31d1)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2023 Andes Technology Corporation
4 * Copyright 2022-2023 NXP
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/thread_private.h>
12#include <kernel/thread_private_arch.h>
13#include <mm/core_mmu.h>
14#include <platform_config.h>
15#include <riscv.h>
16#include <riscv_macros.S>
17#include <tee/optee_abi.h>
18#include <tee/teeabi_opteed.h>
19#include <tee/teeabi_opteed_macros.h>
20
21.section .data
22.balign 4
23
24#ifdef CFG_BOOT_SYNC_CPU
25.equ SEM_CPU_READY, 1
26#endif
27
28	/*
29	 * Setup sp to point to the top of the tmp stack for the current CPU:
30	 * sp is assigned:
31	 * stack_tmp + (hart_index + 1) * stack_tmp_stride - STACK_TMP_GUARD
32	 */
33.macro set_sp
34	/* Unsupported CPU, park it before it breaks something */
35	li	t1, CFG_TEE_CORE_NB_CORE
36	csrr	t0, CSR_XSCRATCH /* t0: hart_index */
37	bge	t0, t1, unhandled_cpu
38	addi	t0, t0, 1
39	lw	t1, stack_tmp_stride
40	mul	t1, t0, t1
41	la	t2, stack_tmp_rel
42	lw	t0, 0(t2)
43	add	t0, t0, t2
44	add	sp, t1, t0
45.endm
46
47.macro cpu_is_ready
48#ifdef CFG_BOOT_SYNC_CPU
49	csrr	t0, CSR_XSCRATCH
50	la	t1, sem_cpu_sync
51	slli	t0, t0, 2
52	add	t1, t1, t0
53	li	t2, SEM_CPU_READY
54	sw	t2, 0(t1)
55	fence
56#endif
57.endm
58
59.macro set_tp
60	csrr	a3, CSR_XSCRATCH /* a3: hart_index */
61	li	a1, THREAD_CORE_LOCAL_SIZE
62	la	tp, thread_core_local
63	LDR	tp, 0(tp)
64	mul	a2, a1, a3
65	add	tp, tp, a2
66	sw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
67	sw	a3, THREAD_CORE_LOCAL_HART_INDEX(tp)
68.endm
69
70.macro set_satp
71	/*
72	 * a0 = hart_index
73	 * a1 = address of boot_mmu_config.satp[0]
74	 * a2 = size of CSR SATP
75	 *
76	 * This hart's SATP is of value (a1 + (a0 * a2)).
77	 */
78	csrr	a0, CSR_XSCRATCH
79	la	a1, boot_mmu_config
80	addi	a1, a1, CORE_MMU_CONFIG_SATP
81	li	a2, CORE_MMU_CONFIG_SATP_SIZE
82	mul	a0, a0, a2
83	add	a1, a1, a0
84	LDR	a2, 0(a1)
85	csrw	CSR_SATP, a2
86	sfence.vma	zero, zero
87.endm
88
89.macro wait_primary
90#ifdef CFG_BOOT_SYNC_CPU
91	la	t0, sem_cpu_sync
92	li	t2, SEM_CPU_READY
931:
94	fence	w, w
95	lw	t1, 0(t0)
96	bne	t1, t2, 1b
97#endif
98.endm
99
100.macro wait_secondary
101#ifdef CFG_BOOT_SYNC_CPU
102	la	t0, sem_cpu_sync
103	li	t1, CFG_TEE_CORE_NB_CORE
104	li	t2, SEM_CPU_READY
1051:
106	addi	t1, t1, -1
107	beqz	t1, 3f
108	addi	t0, t0, 4
1092:
110	fence
111	lw	t1, 0(t0)
112	bne	t1, t2, 2b
113	j	1b
1143:
115#endif
116.endm
117
118#ifdef CFG_BOOT_SYNC_CPU
119#define flush_cpu_semaphores \
120		la	t0, sem_cpu_sync_start
121		la	t1, sem_cpu_sync_end
122		fence
123#else
124#define flush_cpu_semaphores
125#endif
126
127FUNC _start , :
128	/*
129	 * Register usage:
130	 * a0	- if non-NULL holds the hart ID
131	 * a1	- if non-NULL holds the system DTB address
132	 *
133	 * s1 - saved a1
134	 */
135.option push
136.option norelax
137	la	gp, __global_pointer$
138.option pop
139#ifdef CFG_RISCV_M_MODE
140	csrr	a0, CSR_MHARTID
141#endif
142
143#if defined(CFG_DT_ADDR)
144	li	s1, CFG_DT_ADDR
145#else
146	mv	s1, a1		/* Save device tree address into s1 */
147#endif
148	/* Only first hart who wins lottery runs the primary boot sequence. */
149	la	a3, hart_lottery
150	li	a2, 1
151	amoadd.w a3, a2, (a3)
152	/* a3 read from hart_lottery also represents the hart_index */
153	csrw	CSR_XSCRATCH, a3
154
155	bnez	a3, reset_secondary
156	jal	reset_primary
157	j	.
158END_FUNC _start
159
160LOCAL_FUNC reset_primary , : , .identity_map
161UNWIND(	.cantunwind)
162	/*
163	 * Zero bss
164	 */
165	lla	t0, __bss_start
166	lla	t1, __bss_end
167	beq	t0, t1, 1f
1680:
169	STR	zero, (t0)
170	add	t0, t0, RISCV_XLEN_BYTES
171	bne	t0, t1, 0b
1721:
173#ifdef CFG_RISCV_S_MODE
174	lla	t0, _start
175	lla	t1, start_addr
176	STR	t0, (t1)
177#endif
178
179	csrw	CSR_SATP, zero
180
181	/* Setup sp and tp */
182#if defined(CFG_DYN_CONFIG)
183	/*
184	 * Point sp to a temporary stack at the end of mapped core memory.
185	 * Point tp to a temporary struct thread_core_local before the temporary
186	 * stack.
187	 */
188	la	t0, __vcore_free_end
189	li	t1, THREAD_BOOT_INIT_TMP_ALLOC
190	sub	t1, t0, t1
191
192	/* Clear the allocated struct thread_core_local */
193	add	t2, t1, THREAD_CORE_LOCAL_SIZE
1941:	addi	t2, t2, -RISCV_XLEN_BYTES
195	STR	zero, (t2)
196	bgt	t2, t1, 1b
197
198	li	t2, THREAD_ID_INVALID
199	sh	t2, THREAD_CORE_LOCAL_CURR_THREAD(t1)
200	li	t2, THREAD_CLF_TMP
201	sw	t2, THREAD_CORE_LOCAL_FLAGS(t1)
202	li	t2, (__STACK_CANARY_SIZE / 2)
203	sub	t0, t0, t2
204	STR	t0, THREAD_CORE_LOCAL_TMP_STACK_VA_END(t1)
205	li	t2, (THREAD_BOOT_INIT_TMP_ALLOC / 2)
206	sub	t2, t0, t2
207	STR	t2, THREAD_CORE_LOCAL_ABT_STACK_VA_END(t1)
208	csrr	t2, CSR_XSCRATCH /* t2: hart_index */
209	sw	a0, THREAD_CORE_LOCAL_HART_ID(t1)
210	sw	t2, THREAD_CORE_LOCAL_HART_INDEX(t1)
211
212	mv	sp, t0
213	mv	tp, t1
214	/*
215	 * Record a single core, to be changed later before secure world
216	 * boot is done.
217	 */
218	la	t2, thread_core_local
219	STR	tp, 0(t2)
220	la	t2, thread_core_count
221	li	t0, 1
222	STR	t0, 0(t2)
223#else
224	set_sp
225	set_tp
226
227	/* Initialize thread_core_local[hart_index] for early boot */
228	jal	thread_get_abt_stack
229	mv	a1, sp
230	STR	a1, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
231	STR	a0, THREAD_CORE_LOCAL_ABT_STACK_VA_END(tp)
232	li	a0, THREAD_ID_INVALID
233	sh	a0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
234	li	a0, THREAD_CLF_TMP
235	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
236#endif
237
238	jal	plat_primary_init_early
239	jal	console_init
240
241	la	a0, __vcore_free_start
242	la	a1, __vcore_free_end
243#ifdef CFG_DYN_CONFIG
244	li	a2, THREAD_BOOT_INIT_TMP_ALLOC
245	sub	a1, a1, a2
246#endif
247	la	a2, __vcore_free_end
248	jal	boot_mem_init
249
250	mv	a0, x0
251	la	a1, boot_mmu_config
252	jal	core_init_mmu_map
253
254	set_satp
255
256#ifdef CFG_CORE_ASLR
257#if defined(CFG_DYN_CONFIG)
258	/*
259	 * thread_core_local holds only one core and thread_core_count is 1
260	 * so tp points to the updated pointer for thread_core_local.
261	 */
262	la	t0, thread_core_local
263	STR	tp, 0(t0)
264#endif
265#endif
266
267	jal	boot_init_primary_early
268
269	mv	a0, s1		/* s1 contains saved device tree address */
270	mv	a1, x0		/* unused */
271	jal	boot_init_primary_late
272
273#if defined(CFG_DYN_CONFIG)
274	/* Get hart index */
275	jal	__get_core_pos
276
277	/*
278	 * Switch to the new thread_core_local and thread_core_count and
279	 * keep the pointer to the new thread_core_local in a1.
280	 */
281	LDR	a1, __thread_core_count_new
282	la	a2, thread_core_count
283	STR	a1, 0(a2)
284	LDR	a1, __thread_core_local_new
285	la	a2, thread_core_local
286	STR	a1, 0(a2)
287
288	/*
289	 * Update tp to point the new thread_core_local.
290	 * Update sp to use the new tmp stack.
291	 */
292	li	a2, THREAD_CORE_LOCAL_SIZE
293	/* tp = a2 * a0(hart index) + a1(thread_core_local) */
294	mul	a2, a2, a0
295	add	tp, a2, a1
296	LDR	sp, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
297#endif
298
299	/*
300	 * Before entering boot_init_primary_runtime(), we do these two steps:
301	 * 1. Save current sp to s2, and set sp as threads[0].stack_va_end
302	 * 2. Clear the flag which indicates usage of the temporary stack in the
303	 *    current hart's thread_core_local structure.
304	 */
305	mv	s2, sp
306	la	a0, threads
307	LDR	a0, 0(a0)
308	LDR	a0, THREAD_CTX_STACK_VA_END(a0)
309	mv	sp, a0
310	jal	thread_get_core_local
311	mv	s3, a0
312	sw	zero, THREAD_CORE_LOCAL_FLAGS(s3)
313
314	jal	boot_init_primary_runtime
315	jal	boot_init_primary_final
316
317	/*
318	 * After returning from boot_init_primary_late(), the flag and sp are
319	 * restored.
320	 */
321	li	a0, THREAD_CLF_TMP
322	sw	a0, THREAD_CORE_LOCAL_FLAGS(s3)
323	mv	sp, s2
324
325#ifdef _CFG_CORE_STACK_PROTECTOR
326	/* Update stack canary value */
327	addi	sp, sp, -STACK_ALIGNMENT
328	mv	a0, sp
329	li	a1, 1
330#ifdef RV32
331	li	a2, 4
332#else
333	li	a2, 8
334#endif
335	jal	plat_get_random_stack_canaries
336	LDR	s0, 0(sp)
337	la	s1, __stack_chk_guard
338	STR	s0, 0(s1)
339	addi	sp, sp, STACK_ALIGNMENT
340#endif
341
342	cpu_is_ready
343	flush_cpu_semaphores
344	wait_secondary
345
346	jal	thread_clr_boot_thread
347
348	li	a0, TEEABI_OPTEED_RETURN_ENTRY_DONE
349	la	a1, thread_vector_table
350	li	a2, 0
351	li	a3, 0
352	li	a4, 0
353	li	a5, 0
354	j	thread_return_to_udomain
355END_FUNC reset_primary
356
357LOCAL_FUNC reset_secondary , : , .identity_map
358UNWIND(	.cantunwind)
359	wait_primary
360	csrw	CSR_SATP, zero
361#if defined(CFG_DYN_CONFIG)
362	/*
363	 * Update tp to point the new thread_core_local.
364	 * Update sp to use the new tmp stack.
365	 */
366	csrr	t0, CSR_XSCRATCH /* t0: hart_index */
367	LDR	t1, thread_core_local
368	li	t2, THREAD_CORE_LOCAL_SIZE
369	/* tp = t2 * t0(hart index) + t1(thread_core_local) */
370	mul	t2, t2, t0
371	add	tp, t2, t1
372	sw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
373	sw	t0, THREAD_CORE_LOCAL_HART_INDEX(tp)
374	LDR	sp, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
375#else
376	set_sp
377	set_tp
378#endif
379	set_satp
380	cpu_is_ready
381
382	jal	boot_init_secondary
383#ifdef CFG_RISCV_WITH_M_MODE_SM
384	/* Return to untrusted domain */
385	li	a0, TEEABI_OPTEED_RETURN_ON_DONE
386	li	a1, 0
387	li	a2, 0
388	li	a3, 0
389	li	a4, 0
390	li	a5, 0
391	j	thread_return_to_udomain
392#endif
393	j	.
394END_FUNC reset_secondary
395
396LOCAL_FUNC unhandled_cpu , :
397	wfi
398	j	unhandled_cpu
399END_FUNC unhandled_cpu
400
401	.section .identity_map.data
402	.balign	8
403LOCAL_DATA hart_lottery , :
404	/* The hart who first increments this variable will be primary hart. */
405	.word	0
406END_DATA hart_lottery
407
408#ifdef CFG_BOOT_SYNC_CPU
409LOCAL_DATA sem_cpu_sync_start , :
410	.word	sem_cpu_sync
411END_DATA sem_cpu_sync_start
412
413LOCAL_DATA sem_cpu_sync_end , :
414	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
415END_DATA sem_cpu_sync_end
416#endif
417
418#if !defined(CFG_DYN_CONFIG)
419LOCAL_DATA stack_tmp_rel , :
420	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
421END_DATA stack_tmp_rel
422#endif
423
424	.balign	8
425LOCAL_DATA boot_mmu_config , : /* struct core_mmu_config */
426	.skip	CORE_MMU_CONFIG_SIZE
427END_DATA boot_mmu_config
428