xref: /optee_os/core/arch/riscv/kernel/entry.S (revision ca71b6fa3fb3feb0282b04f91b27eab518118ac8)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2023 Andes Technology Corporation
4 * Copyright 2022-2023 NXP
5 */
6
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread.h>
11#include <kernel/riscv_elf.h>
12#include <kernel/thread_private.h>
13#include <kernel/thread_private_arch.h>
14#include <mm/core_mmu.h>
15#include <platform_config.h>
16#include <riscv.h>
17#include <riscv_macros.S>
18#include <tee/optee_abi.h>
19#include <tee/teeabi_opteed.h>
20#include <tee/teeabi_opteed_macros.h>
21
22.section .data
23.balign 4
24
25#ifdef CFG_BOOT_SYNC_CPU
26.equ SEM_CPU_READY, 1
27#endif
28
29	/*
30	 * Setup sp to point to the top of the tmp stack for the current CPU:
31	 * sp is assigned:
32	 * stack_tmp + (hart_index + 1) * stack_tmp_stride - STACK_TMP_GUARD
33	 */
34.macro set_sp
35	/* Unsupported CPU, park it before it breaks something */
36	li	t1, CFG_TEE_CORE_NB_CORE
37	csrr	t0, CSR_XSCRATCH /* t0: hart_index */
38	bge	t0, t1, unhandled_cpu
39	addi	t0, t0, 1
40	lw	t1, stack_tmp_stride
41	mul	t1, t0, t1
42	la	t2, stack_tmp_rel
43	lw	t0, 0(t2)
44	add	t0, t0, t2
45	add	sp, t1, t0
46.endm
47
48.macro cpu_is_ready
49#ifdef CFG_BOOT_SYNC_CPU
50	csrr	t0, CSR_XSCRATCH
51	la	t1, sem_cpu_sync
52	slli	t0, t0, 2
53	add	t1, t1, t0
54	li	t2, SEM_CPU_READY
55	sw	t2, 0(t1)
56	fence
57#endif
58.endm
59
60.macro set_tp
61	csrr	t0, CSR_XSCRATCH /* t0: hart_index */
62	li	t1, THREAD_CORE_LOCAL_SIZE
63	mul	t2, t1, t0
64	la	tp, thread_core_local
65	LDR	tp, 0(tp)
66	add	tp, tp, t2
67	/* Save hart_id and hart_index into thread_core_local */
68	sw	s0, THREAD_CORE_LOCAL_HART_ID(tp)
69	sw	t0, THREAD_CORE_LOCAL_HART_INDEX(tp)
70.endm
71
72.macro set_satp
73	/*
74	 * a0 = hart_index
75	 * a1 = address of boot_mmu_config.satp[0]
76	 * a2 = size of CSR SATP
77	 *
78	 * This hart's SATP is of value (a1 + (a0 * a2)).
79	 */
80	csrr	a0, CSR_XSCRATCH
81	la	a1, boot_mmu_config
82	addi	a1, a1, CORE_MMU_CONFIG_SATP
83	li	a2, CORE_MMU_CONFIG_SATP_SIZE
84	mul	a0, a0, a2
85	add	a1, a1, a0
86	LDR	a2, 0(a1)
87	csrw	CSR_SATP, a2
88	sfence.vma	zero, zero
89.endm
90
91.macro wait_primary
92#ifdef CFG_BOOT_SYNC_CPU
93	la	t0, sem_cpu_sync
94	li	t2, SEM_CPU_READY
951:
96	fence	w, w
97	lw	t1, 0(t0)
98	bne	t1, t2, 1b
99#endif
100.endm
101
102.macro wait_secondary
103#ifdef CFG_BOOT_SYNC_CPU
104	la	t0, sem_cpu_sync
105	li	t1, CFG_TEE_CORE_NB_CORE
106	li	t2, SEM_CPU_READY
1071:
108	addi	t1, t1, -1
109	beqz	t1, 3f
110	addi	t0, t0, 4
1112:
112	fence
113	lw	t1, 0(t0)
114	bne	t1, t2, 2b
115	j	1b
1163:
117#endif
118.endm
119
120#ifdef CFG_BOOT_SYNC_CPU
121#define flush_cpu_semaphores \
122		la	t0, sem_cpu_sync_start
123		la	t1, sem_cpu_sync_end
124		fence
125#else
126#define flush_cpu_semaphores
127#endif
128
129FUNC _start , :
130	/*
131	 * Register usage:
132	 * a0	- if non-NULL holds the hart ID
133	 * a1	- if non-NULL holds the system DTB address
134	 *
135	 * s0 - saved a0
136	 * s1 - saved a1
137	 */
138.option push
139.option norelax
140	la	gp, __global_pointer$
141.option pop
142#ifdef CFG_RISCV_M_MODE
143	csrr	a0, CSR_MHARTID
144#endif
145	mv	s0, a0		/* Save hart ID into s0 */
146
147#if defined(CFG_DT_ADDR)
148	li	s1, CFG_DT_ADDR
149#else
150	mv	s1, a1		/* Save device tree address into s1 */
151#endif
152	/* Only first hart who wins lottery runs the primary boot sequence. */
153	la	a3, hart_lottery
154	li	a2, 1
155	amoadd.w a3, a2, (a3)
156	/* a3 read from hart_lottery also represents the hart_index */
157	csrw	CSR_XSCRATCH, a3
158
159	bnez	a3, reset_secondary
160	jal	reset_primary
161	j	.
162END_FUNC _start
163
164LOCAL_FUNC reset_primary , : , .identity_map
165UNWIND(	.cantunwind)
166#ifdef CFG_CORE_ASLR
167	li	a0, 0
168	jal	relocate
169#endif
170	/*
171	 * Zero bss
172	 */
173	lla	t0, __bss_start
174	lla	t1, __bss_end
175	beq	t0, t1, 1f
1760:
177	STR	zero, (t0)
178	add	t0, t0, RISCV_XLEN_BYTES
179	bne	t0, t1, 0b
1801:
181#ifdef CFG_RISCV_S_MODE
182	lla	t0, _start
183	lla	t1, start_addr
184	STR	t0, (t1)
185#endif
186
187	csrw	CSR_SATP, zero
188
189	/* Setup sp and tp */
190#if defined(CFG_DYN_CONFIG)
191	/*
192	 * Point sp to a temporary stack at the end of mapped core memory.
193	 * Point tp to a temporary struct thread_core_local before the temporary
194	 * stack.
195	 */
196	la	t0, __vcore_free_end
197	li	t1, THREAD_BOOT_INIT_TMP_ALLOC
198	sub	t1, t0, t1
199
200	/* Clear the allocated struct thread_core_local */
201	add	t2, t1, THREAD_CORE_LOCAL_SIZE
2021:	addi	t2, t2, -RISCV_XLEN_BYTES
203	STR	zero, (t2)
204	bgt	t2, t1, 1b
205
206	li	t2, THREAD_ID_INVALID
207	sh	t2, THREAD_CORE_LOCAL_CURR_THREAD(t1)
208	li	t2, THREAD_CLF_TMP
209	sw	t2, THREAD_CORE_LOCAL_FLAGS(t1)
210	li	t2, (__STACK_CANARY_SIZE / 2)
211	sub	t0, t0, t2
212	STR	t0, THREAD_CORE_LOCAL_TMP_STACK_VA_END(t1)
213	li	t2, (THREAD_BOOT_INIT_TMP_ALLOC / 2)
214	sub	t2, t0, t2
215	STR	t2, THREAD_CORE_LOCAL_ABT_STACK_VA_END(t1)
216	csrr	t2, CSR_XSCRATCH /* t2: hart_index */
217	sw	a0, THREAD_CORE_LOCAL_HART_ID(t1)
218	sw	t2, THREAD_CORE_LOCAL_HART_INDEX(t1)
219
220	mv	sp, t0
221	mv	tp, t1
222	/*
223	 * Record a single core, to be changed later before secure world
224	 * boot is done.
225	 */
226	la	t2, thread_core_local
227	STR	tp, 0(t2)
228	la	t2, thread_core_count
229	li	t0, 1
230	STR	t0, 0(t2)
231#else
232	set_sp
233	set_tp
234
235	/* Initialize thread_core_local[hart_index] for early boot */
236	jal	thread_get_abt_stack
237	mv	a1, sp
238	STR	a1, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
239	STR	a0, THREAD_CORE_LOCAL_ABT_STACK_VA_END(tp)
240	li	a0, THREAD_ID_INVALID
241	sh	a0, THREAD_CORE_LOCAL_CURR_THREAD(tp)
242	li	a0, THREAD_CLF_TMP
243	sw	a0, THREAD_CORE_LOCAL_FLAGS(tp)
244#endif
245
246	jal	plat_primary_init_early
247	jal	console_init
248
249	la	a0, __vcore_free_start
250	la	a1, __vcore_free_end
251#ifdef CFG_DYN_CONFIG
252	li	a2, THREAD_BOOT_INIT_TMP_ALLOC
253	sub	a1, a1, a2
254#endif
255	la	a2, __vcore_free_end
256	jal	boot_mem_init
257
258	mv	a0, x0
259	la	a1, boot_mmu_config
260	jal	core_init_mmu_map
261
262	set_satp
263#ifdef CFG_CORE_ASLR
264	la	a0, boot_mmu_config
265	LDR	a0, CORE_MMU_CONFIG_MAP_OFFSET(a0)
266	beqz	a0, 1f		/* no offset, skip dynamic relocation */
267	jal	relocate
2681:
269#endif
270
271#ifdef CFG_CORE_ASLR
272#if defined(CFG_DYN_CONFIG)
273	/*
274	 * thread_core_local holds only one core and thread_core_count is 1
275	 * so tp points to the updated pointer for thread_core_local.
276	 */
277	la	t0, thread_core_local
278	STR	tp, 0(t0)
279#endif
280#endif
281
282	jal	boot_init_primary_early
283
284	mv	a0, s1		/* s1 contains saved device tree address */
285	mv	a1, x0		/* unused */
286	jal	boot_init_primary_late
287
288#if defined(CFG_DYN_CONFIG)
289	/* Get hart index */
290	jal	__get_core_pos
291
292	/*
293	 * Switch to the new thread_core_local and thread_core_count and
294	 * keep the pointer to the new thread_core_local in a1.
295	 */
296	LDR	a1, __thread_core_count_new
297	la	a2, thread_core_count
298	STR	a1, 0(a2)
299	LDR	a1, __thread_core_local_new
300	la	a2, thread_core_local
301	STR	a1, 0(a2)
302
303	/*
304	 * Update tp to point the new thread_core_local.
305	 * Update sp to use the new tmp stack.
306	 */
307	li	a2, THREAD_CORE_LOCAL_SIZE
308	/* tp = a2 * a0(hart index) + a1(thread_core_local) */
309	mul	a2, a2, a0
310	add	tp, a2, a1
311	LDR	sp, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
312#endif
313
314	/*
315	 * Before entering boot_init_primary_runtime(), we do these two steps:
316	 * 1. Save current sp to s2, and set sp as threads[0].stack_va_end
317	 * 2. Clear the flag which indicates usage of the temporary stack in the
318	 *    current hart's thread_core_local structure.
319	 */
320	mv	s2, sp
321	la	a0, threads
322	LDR	a0, 0(a0)
323	LDR	a0, THREAD_CTX_STACK_VA_END(a0)
324	mv	sp, a0
325	jal	thread_get_core_local
326	mv	s3, a0
327	sw	zero, THREAD_CORE_LOCAL_FLAGS(s3)
328
329	jal	boot_init_primary_runtime
330	jal	boot_init_primary_final
331
332	/*
333	 * After returning from boot_init_primary_late(), the flag and sp are
334	 * restored.
335	 */
336	li	a0, THREAD_CLF_TMP
337	sw	a0, THREAD_CORE_LOCAL_FLAGS(s3)
338	mv	sp, s2
339
340#ifdef _CFG_CORE_STACK_PROTECTOR
341	/* Update stack canary value */
342	addi	sp, sp, -STACK_ALIGNMENT
343	mv	a0, sp
344	li	a1, 1
345#ifdef RV32
346	li	a2, 4
347#else
348	li	a2, 8
349#endif
350	jal	plat_get_random_stack_canaries
351	LDR	s0, 0(sp)
352	la	s1, __stack_chk_guard
353	STR	s0, 0(s1)
354	addi	sp, sp, STACK_ALIGNMENT
355#endif
356
357	cpu_is_ready
358	flush_cpu_semaphores
359	wait_secondary
360
361	jal	thread_clr_boot_thread
362
363	li	a0, TEEABI_OPTEED_RETURN_ENTRY_DONE
364	la	a1, thread_vector_table
365	li	a2, 0
366	li	a3, 0
367	li	a4, 0
368	li	a5, 0
369	j	thread_return_to_udomain
370END_FUNC reset_primary
371
372LOCAL_FUNC reset_secondary , : , .identity_map
373UNWIND(	.cantunwind)
374	wait_primary
375	csrw	CSR_SATP, zero
376#if defined(CFG_DYN_CONFIG)
377	/*
378	 * Update tp to point the new thread_core_local.
379	 * Update sp to use the new tmp stack.
380	 */
381	csrr	t0, CSR_XSCRATCH /* t0: hart_index */
382	LDR	t1, thread_core_local
383	li	t2, THREAD_CORE_LOCAL_SIZE
384	/* tp = t2 * t0(hart index) + t1(thread_core_local) */
385	mul	t2, t2, t0
386	add	tp, t2, t1
387	sw	a0, THREAD_CORE_LOCAL_HART_ID(tp)
388	sw	t0, THREAD_CORE_LOCAL_HART_INDEX(tp)
389	LDR	sp, THREAD_CORE_LOCAL_TMP_STACK_VA_END(tp)
390#else
391	set_sp
392	set_tp
393#endif
394	set_satp
395	cpu_is_ready
396
397	jal	boot_init_secondary
398#ifdef CFG_RISCV_WITH_M_MODE_SM
399	/* Return to untrusted domain */
400	li	a0, TEEABI_OPTEED_RETURN_ON_DONE
401	li	a1, 0
402	li	a2, 0
403	li	a3, 0
404	li	a4, 0
405	li	a5, 0
406	j	thread_return_to_udomain
407#endif
408	j	.
409END_FUNC reset_secondary
410
411LOCAL_FUNC unhandled_cpu , :
412	wfi
413	j	unhandled_cpu
414END_FUNC unhandled_cpu
415
416#if defined(CFG_CORE_ASLR)
417/*
418 * void relocate(unsigned long offset);
419 *
420 * This function updates dynamic relocations.
421 */
422LOCAL_FUNC relocate , :
423	/*
424	 * a0 holds relocate offset
425	 */
426	la	t0, __rel_dyn_start
427	la	t1, __rel_dyn_end
428	beq	t0, t1, 5f
4292:
430	LDR	t5, RISCV_XLEN_BYTES(t0)        /* t5: relocation info:type */
431	li	t3, R_RISCV_RELATIVE
432	bne	t5, t3, 3f
433	LDR	t3, 0(t0)                       /* t3: offset */
434	LDR	t5, (RISCV_XLEN_BYTES * 2)(t0)  /* t5: addend */
435	add	t5, t5, a0                      /* t5: add ASLR offset */
436	STR	t5, 0(t3)                       /* update address */
437	j	4f
438
4393:
440	la	t4, __dyn_sym_start
441	srli	t6, t5, SYM_INDEX             /* t6: sym table index */
442	andi	t5, t5, 0xFF                  /* t5: relocation type */
443	li	t3, RELOC_TYPE
444	bne	t5, t3, 4f
445
446	/* address R_RISCV_64 or R_RISCV_32 cases */
447	LDR	t3, 0(t0)
448	li	t5, SYM_SIZE
449	mul	t6, t6, t5
450	add	t5, t4, t6
451	LDR	t6, (RISCV_XLEN_BYTES * 2)(t0)  /* t6: addend */
452	LDR	t5, RISCV_XLEN_BYTES(t5)        /* t5: sym value */
453	add	t5, t5, t6
454	add	t5, t5, a0                      /* t5: add ASLR offset */
455	STR	t5, 0(t3)                       /* update address */
456
4574:
458	addi	t0, t0, (RISCV_XLEN_BYTES * 3)
459	blt	t0, t1, 2b
4605:
461	ret
462END_FUNC relocate
463#endif
464
465	.section .identity_map.data
466	.balign	8
467LOCAL_DATA hart_lottery , :
468	/* The hart who first increments this variable will be primary hart. */
469	.word	0
470END_DATA hart_lottery
471
472#ifdef CFG_BOOT_SYNC_CPU
473LOCAL_DATA sem_cpu_sync_start , :
474	.word	sem_cpu_sync
475END_DATA sem_cpu_sync_start
476
477LOCAL_DATA sem_cpu_sync_end , :
478	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
479END_DATA sem_cpu_sync_end
480#endif
481
482#if !defined(CFG_DYN_CONFIG)
483LOCAL_DATA stack_tmp_rel , :
484	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
485END_DATA stack_tmp_rel
486#endif
487
488	.section .identity_map.data
489	.balign	8
490DATA boot_mmu_config , : /* struct core_mmu_config */
491	.skip	CORE_MMU_CONFIG_SIZE
492END_DATA boot_mmu_config
493