xref: /optee_os/core/arch/riscv/kernel/entry.S (revision 29661368f51dc258d8c3f83dc0e53e9e3c9081b4)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2023 Andes Technology Corporation
4 * Copyright 2022-2023 NXP
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/thread_private.h>
12#include <kernel/thread_private_arch.h>
13#include <mm/core_mmu.h>
14#include <platform_config.h>
15#include <riscv.h>
16#include <riscv_macros.S>
17#include <tee/optee_abi.h>
18#include <tee/teeabi_opteed.h>
19#include <tee/teeabi_opteed_macros.h>
20
21.section .data
22.balign 4
23
24#ifdef CFG_BOOT_SYNC_CPU
25.equ SEM_CPU_READY, 1
26#endif
27
28	/*
29	 * Setup sp to point to the top of the tmp stack for the current CPU:
30	 * sp is assigned:
31	 * stack_tmp + (hart_index + 1) * stack_tmp_stride - STACK_TMP_GUARD
32	 */
33.macro set_sp
34	/* Unsupported CPU, park it before it breaks something */
35	li	t1, CFG_TEE_CORE_NB_CORE
36	csrr	t0, CSR_XSCRATCH /* t0: hart_index */
37	bge	t0, t1, unhandled_cpu
38	addi	t0, t0, 1
39	lw	t1, stack_tmp_stride
40	mul	t1, t0, t1
41	la	t2, stack_tmp_rel
42	lw	t0, 0(t2)
43	add	t0, t0, t2
44	add	sp, t1, t0
45.endm
46
47.macro cpu_is_ready
48#ifdef CFG_BOOT_SYNC_CPU
49	csrr	t0, CSR_XSCRATCH
50	la	t1, sem_cpu_sync
51	slli	t0, t0, 2
52	add	t1, t1, t0
53	li	t2, SEM_CPU_READY
54	sw	t2, 0(t1)
55	fence
56#endif
57.endm
58
59.macro set_tp
60	csrr	t0, CSR_XSCRATCH /* t0: hart_index */
61	li	t1, THREAD_CORE_LOCAL_SIZE
62	mul	t2, t1, t0
63	la	tp, thread_core_local
64	LDR	tp, 0(tp)
65	add	tp, tp, t2
66	/* Save hart_id and hart_index into thread_core_local */
67	sw	s0, THREAD_CORE_LOCAL_HART_ID(tp)
68	sw	t0, THREAD_CORE_LOCAL_HART_INDEX(tp)
69.endm
70
71.macro set_satp
72	/*
73	 * a0 = hart_index
74	 * a1 = address of boot_mmu_config.satp[0]
75	 * a2 = size of CSR SATP
76	 *
77	 * This hart's SATP is of value (a1 + (a0 * a2)).
78	 */
79	csrr	a0, CSR_XSCRATCH
80	la	a1, boot_mmu_config
81	addi	a1, a1, CORE_MMU_CONFIG_SATP
82	li	a2, CORE_MMU_CONFIG_SATP_SIZE
83	mul	a0, a0, a2
84	add	a1, a1, a0
85	LDR	a2, 0(a1)
86	csrw	CSR_SATP, a2
87	sfence.vma	zero, zero
88.endm
89
90.macro wait_primary
91#ifdef CFG_BOOT_SYNC_CPU
92	la	t0, sem_cpu_sync
93	li	t2, SEM_CPU_READY
941:
95	fence	w, w
96	lw	t1, 0(t0)
97	bne	t1, t2, 1b
98#endif
99.endm
100
101.macro wait_secondary
102#ifdef CFG_BOOT_SYNC_CPU
103	la	t0, sem_cpu_sync
104	li	t1, CFG_TEE_CORE_NB_CORE
105	li	t2, SEM_CPU_READY
1061:
107	addi	t1, t1, -1
108	beqz	t1, 3f
109	addi	t0, t0, 4
1102:
111	fence
112	lw	t1, 0(t0)
113	bne	t1, t2, 2b
114	j	1b
1153:
116#endif
117.endm
118
119#ifdef CFG_BOOT_SYNC_CPU
120#define flush_cpu_semaphores \
121		la	t0, sem_cpu_sync_start
122		la	t1, sem_cpu_sync_end
123		fence
124#else
125#define flush_cpu_semaphores
126#endif
127
128FUNC _start , :
129	/*
130	 * Register usage:
131	 * a0	- if non-NULL holds the hart ID
132	 * a1	- if non-NULL holds the system DTB address
133	 *
134	 * s0 - saved a0
135	 * s1 - saved a1
136	 */
137.option push
138.option norelax
139	la	gp, __global_pointer$
140.option pop
141#ifdef CFG_RISCV_M_MODE
142	csrr	a0, CSR_MHARTID
143#endif
144	mv	s0, a0		/* Save hart ID into s0 */
145
146#if defined(CFG_DT_ADDR)
147	li	s1, CFG_DT_ADDR
148#else
149	mv	s1, a1		/* Save device tree address into s1 */
150#endif
151	/* Only first hart who wins lottery runs the primary boot sequence. */
152	la	a3, hart_lottery
153	li	a2, 1
154	amoadd.w a3, a2, (a3)
155	/* a3 read from hart_lottery also represents the hart_index */
156	csrw	CSR_XSCRATCH, a3
157
158	bnez	a3, reset_secondary
159	jal	reset_primary
160	j	.
161END_FUNC _start
162
163LOCAL_FUNC reset_primary , : , .identity_map
164UNWIND(	.cantunwind)
165	/*
166	 * Zero bss
167	 */
168	lla	t0, __bss_start
169	lla	t1, __bss_end
170	beq	t0, t1, 1f
1710:
172	STR	zero, (t0)
173	add	t0, t0, RISCV_XLEN_BYTES
174	bne	t0, t1, 0b
1751:
176#ifdef CFG_RISCV_S_MODE
177	lla	t0, _start
178	lla	t1, start_addr
179	STR	t0, (t1)
180#endif
181
182	csrw	CSR_SATP, zero
183
184	/* Setup sp and tp */
185#if defined(CFG_DYN_CONFIG)
186	/*
187	 * Point sp to a temporary stack at the end of mapped core memory.
188	 * Point tp to a temporary struct thread_core_local before the temporary
189	 * stack.
190	 */
191	la	t0, __vcore_free_end
192	li	t1, THREAD_BOOT_INIT_TMP_ALLOC
193	sub	t1, t0, t1
194
195	/* Clear the allocated struct thread_core_local */
196	add	t2, t1, THREAD_CORE_LOCAL_SIZE
1971:	addi	t2, t2, -RISCV_XLEN_BYTES
198	STR	zero, (t2)
199	bgt	t2, t1, 1b
200
201	li	t2, THREAD_ID_INVALID
202	sh	t2, THREAD_CORE_LOCAL_CURR_THREAD(t1)
203	li	t2, THREAD_CLF_TMP
204	sw	t2, THREAD_CORE_LOCAL_FLAGS(t1)
205	li	t2, (__STACK_CANARY_SIZE / 2)
206	sub	t0, t0, t2
207	STR	t0, THREAD_CORE_LOCAL_TMP_STACK_VA_END(t1)
208	li	t2, (THREAD_BOOT_INIT_TMP_ALLOC / 2)
209	sub	t2, t0, t2
210	STR	t2, THREAD_CORE_LOCAL_ABT_STACK_VA_END(t1)
211	csrr	t2, CSR_XSCRATCH /* t2: hart_index */
212	sw	a0, THREAD_CORE_LOCAL_HART_ID(t1)
213	sw	t2, THREAD_CORE_LOCAL_HART_INDEX(t1)
214
215	mv	sp, t0
216	mv	tp, t1
217	/*
218	 * Record a single core, to be changed later before secure world
219	 * boot is done.
220	 */
221	la	t2, thread_core_local
222	STR	tp, 0(t2)
223	la	t2, thread_core_count
224	li	t0, 1
225	STR	t0, 0(t2)
226#else
227	set_sp
228	set_tp
229
230	/* Initialize thread_core_local[hart_index] for early boot */
231	jal	thread_get_abt_stack
232	mv	a1, sp
233	STR	a1, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
234	STR	a0, THREAD_CORE_LOCAL_ABT_STACK_VA_END(tp)
235	li	a0, THREAD_ID_INVALID
236	sh	a0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
237	li	a0, THREAD_CLF_TMP
238	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
239#endif
240
241	jal	plat_primary_init_early
242	jal	console_init
243
244	la	a0, __vcore_free_start
245	la	a1, __vcore_free_end
246#ifdef CFG_DYN_CONFIG
247	li	a2, THREAD_BOOT_INIT_TMP_ALLOC
248	sub	a1, a1, a2
249#endif
250	la	a2, __vcore_free_end
251	jal	boot_mem_init
252
253	mv	a0, x0
254	la	a1, boot_mmu_config
255	jal	core_init_mmu_map
256
257	set_satp
258
259#ifdef CFG_CORE_ASLR
260#if defined(CFG_DYN_CONFIG)
261	/*
262	 * thread_core_local holds only one core and thread_core_count is 1
263	 * so tp points to the updated pointer for thread_core_local.
264	 */
265	la	t0, thread_core_local
266	STR	tp, 0(t0)
267#endif
268#endif
269
270	jal	boot_init_primary_early
271
272	mv	a0, s1		/* s1 contains saved device tree address */
273	mv	a1, x0		/* unused */
274	jal	boot_init_primary_late
275
276#if defined(CFG_DYN_CONFIG)
277	/* Get hart index */
278	jal	__get_core_pos
279
280	/*
281	 * Switch to the new thread_core_local and thread_core_count and
282	 * keep the pointer to the new thread_core_local in a1.
283	 */
284	LDR	a1, __thread_core_count_new
285	la	a2, thread_core_count
286	STR	a1, 0(a2)
287	LDR	a1, __thread_core_local_new
288	la	a2, thread_core_local
289	STR	a1, 0(a2)
290
291	/*
292	 * Update tp to point the new thread_core_local.
293	 * Update sp to use the new tmp stack.
294	 */
295	li	a2, THREAD_CORE_LOCAL_SIZE
296	/* tp = a2 * a0(hart index) + a1(thread_core_local) */
297	mul	a2, a2, a0
298	add	tp, a2, a1
299	LDR	sp, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
300#endif
301
302	/*
303	 * Before entering boot_init_primary_runtime(), we do these two steps:
304	 * 1. Save current sp to s2, and set sp as threads[0].stack_va_end
305	 * 2. Clear the flag which indicates usage of the temporary stack in the
306	 *    current hart's thread_core_local structure.
307	 */
308	mv	s2, sp
309	la	a0, threads
310	LDR	a0, 0(a0)
311	LDR	a0, THREAD_CTX_STACK_VA_END(a0)
312	mv	sp, a0
313	jal	thread_get_core_local
314	mv	s3, a0
315	sw	zero, THREAD_CORE_LOCAL_FLAGS(s3)
316
317	jal	boot_init_primary_runtime
318	jal	boot_init_primary_final
319
320	/*
321	 * After returning from boot_init_primary_late(), the flag and sp are
322	 * restored.
323	 */
324	li	a0, THREAD_CLF_TMP
325	sw	a0, THREAD_CORE_LOCAL_FLAGS(s3)
326	mv	sp, s2
327
328#ifdef _CFG_CORE_STACK_PROTECTOR
329	/* Update stack canary value */
330	addi	sp, sp, -STACK_ALIGNMENT
331	mv	a0, sp
332	li	a1, 1
333#ifdef RV32
334	li	a2, 4
335#else
336	li	a2, 8
337#endif
338	jal	plat_get_random_stack_canaries
339	LDR	s0, 0(sp)
340	la	s1, __stack_chk_guard
341	STR	s0, 0(s1)
342	addi	sp, sp, STACK_ALIGNMENT
343#endif
344
345	cpu_is_ready
346	flush_cpu_semaphores
347	wait_secondary
348
349	jal	thread_clr_boot_thread
350
351	li	a0, TEEABI_OPTEED_RETURN_ENTRY_DONE
352	la	a1, thread_vector_table
353	li	a2, 0
354	li	a3, 0
355	li	a4, 0
356	li	a5, 0
357	j	thread_return_to_udomain
358END_FUNC reset_primary
359
360LOCAL_FUNC reset_secondary , : , .identity_map
361UNWIND(	.cantunwind)
362	wait_primary
363	csrw	CSR_SATP, zero
364#if defined(CFG_DYN_CONFIG)
365	/*
366	 * Update tp to point the new thread_core_local.
367	 * Update sp to use the new tmp stack.
368	 */
369	csrr	t0, CSR_XSCRATCH /* t0: hart_index */
370	LDR	t1, thread_core_local
371	li	t2, THREAD_CORE_LOCAL_SIZE
372	/* tp = t2 * t0(hart index) + t1(thread_core_local) */
373	mul	t2, t2, t0
374	add	tp, t2, t1
375	sw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
376	sw	t0, THREAD_CORE_LOCAL_HART_INDEX(tp)
377	LDR	sp, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
378#else
379	set_sp
380	set_tp
381#endif
382	set_satp
383	cpu_is_ready
384
385	jal	boot_init_secondary
386#ifdef CFG_RISCV_WITH_M_MODE_SM
387	/* Return to untrusted domain */
388	li	a0, TEEABI_OPTEED_RETURN_ON_DONE
389	li	a1, 0
390	li	a2, 0
391	li	a3, 0
392	li	a4, 0
393	li	a5, 0
394	j	thread_return_to_udomain
395#endif
396	j	.
397END_FUNC reset_secondary
398
399LOCAL_FUNC unhandled_cpu , :
400	wfi
401	j	unhandled_cpu
402END_FUNC unhandled_cpu
403
404	.section .identity_map.data
405	.balign	8
406LOCAL_DATA hart_lottery , :
407	/* The hart who first increments this variable will be primary hart. */
408	.word	0
409END_DATA hart_lottery
410
411#ifdef CFG_BOOT_SYNC_CPU
412LOCAL_DATA sem_cpu_sync_start , :
413	.word	sem_cpu_sync
414END_DATA sem_cpu_sync_start
415
416LOCAL_DATA sem_cpu_sync_end , :
417	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
418END_DATA sem_cpu_sync_end
419#endif
420
421#if !defined(CFG_DYN_CONFIG)
422LOCAL_DATA stack_tmp_rel , :
423	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
424END_DATA stack_tmp_rel
425#endif
426
427	.section .identity_map.data
428	.balign	8
429DATA boot_mmu_config , : /* struct core_mmu_config */
430	.skip	CORE_MMU_CONFIG_SIZE
431END_DATA boot_mmu_config
432