xref: /optee_os/core/arch/arm/kernel/entry_a64.S (revision 2570cd0b8d773a181df1ddf8db266aa39a96518e)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015, Linaro Limited
4 */
5
6#include <platform_config.h>
7
8#include <arm64_macros.S>
9#include <arm.h>
10#include <asm.S>
11#include <generated/asm-defines.h>
12#include <keep.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16
17	/*
18	 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
19	 * SP_EL0 is assigned stack_tmp_export + cpu_id * stack_tmp_stride
20	 * SP_EL1 is assigned thread_core_local[cpu_id]
21	 */
22	.macro set_sp
23		bl	__get_core_pos
24		cmp	x0, #CFG_TEE_CORE_NB_CORE
25		/* Unsupported CPU, park it before it breaks something */
26		bge	unhandled_cpu
27		adr	x1, stack_tmp_stride
28		ldr	w1, [x1]
29		mul	x1, x0, x1
30		adrp	x0, stack_tmp_export
31		add	x0, x0, :lo12:stack_tmp_export
32		ldr	x0, [x0]
33		msr	spsel, #0
34		add	sp, x1, x0
35		bl	thread_get_core_local
36		msr	spsel, #1
37		mov	sp, x0
38		msr	spsel, #0
39	.endm
40
41	.macro set_sctlr_el1
42		mrs	x0, sctlr_el1
43		orr	x0, x0, #SCTLR_I
44		orr	x0, x0, #SCTLR_SA
45		orr	x0, x0, #SCTLR_SPAN
46#if defined(CFG_CORE_RWDATA_NOEXEC)
47		orr	x0, x0, #SCTLR_WXN
48#endif
49#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
50		orr	x0, x0, #SCTLR_A
51#else
52		bic	x0, x0, #SCTLR_A
53#endif
54		msr	sctlr_el1, x0
55	.endm
56
57FUNC _start , :
58	mov	x19, x0		/* Save pagable part address */
59#if defined(CFG_DT_ADDR)
60	ldr     x20, =CFG_DT_ADDR
61#else
62	mov	x20, x2		/* Save DT address */
63#endif
64
65	adr	x0, reset_vect_table
66	msr	vbar_el1, x0
67	isb
68
69	set_sctlr_el1
70	isb
71
72#ifdef CFG_WITH_PAGER
73	/*
74	 * Move init code into correct location and move hashes to a
75	 * temporary safe location until the heap is initialized.
76	 *
77	 * The binary is built as:
78	 * [Pager code, rodata and data] : In correct location
79	 * [Init code and rodata] : Should be copied to __init_start
80	 * [struct boot_embdata + data] : Should be saved before
81	 * initializing pager, first uint32_t tells the length of the data
82	 */
83	adr	x0, __init_start	/* dst */
84	adr	x1, __data_end		/* src */
85	adr	x2, __init_end
86	sub	x2, x2, x0		/* init len */
87	ldr	w4, [x1, x2]		/* length of hashes etc */
88	add	x2, x2, x4		/* length of init and hashes etc */
89	/* Copy backwards (as memmove) in case we're overlapping */
90	add	x0, x0, x2		/* __init_start + len */
91	add	x1, x1, x2		/* __data_end + len */
92	adr	x3, cached_mem_end
93	str	x0, [x3]
94	adr	x2, __init_start
95copy_init:
96	ldp	x3, x4, [x1, #-16]!
97	stp	x3, x4, [x0, #-16]!
98	cmp	x0, x2
99	b.gt	copy_init
100#else
101	/*
102	 * The binary is built as:
103	 * [Core, rodata and data] : In correct location
104	 * [struct boot_embdata + data] : Should be moved to __end, first
105	 * uint32_t tells the length of the struct + data
106	 */
107	adr_l	x0, __end		/* dst */
108	adr_l	x1, __data_end		/* src */
109	ldr	w2, [x1]		/* struct boot_embdata::total_len */
110	/* Copy backwards (as memmove) in case we're overlapping */
111	add	x0, x0, x2
112	add	x1, x1, x2
113	adr	x3, cached_mem_end
114	str	x0, [x3]
115	adr_l	x2, __end
116
117copy_init:
118	ldp	x3, x4, [x1, #-16]!
119	stp	x3, x4, [x0, #-16]!
120	cmp	x0, x2
121	b.gt	copy_init
122#endif
123
124	/*
125	 * Clear .bss, this code obviously depends on the linker keeping
126	 * start/end of .bss at least 8 byte aligned.
127	 */
128	adr_l	x0, __bss_start
129	adr_l	x1, __bss_end
130clear_bss:
131	str	xzr, [x0], #8
132	cmp	x0, x1
133	b.lt	clear_bss
134
135#ifdef CFG_VIRTUALIZATION
136	/*
137	 * Clear .nex_bss, this code obviously depends on the linker keeping
138	 * start/end of .bss at least 8 byte aligned.
139	 */
140	adr	x0, __nex_bss_start
141	adr	x1, __nex_bss_end
142clear_nex_bss:
143	str	xzr, [x0], #8
144	cmp	x0, x1
145	b.lt	clear_nex_bss
146#endif
147
148	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
149	set_sp
150
151	bl	thread_init_thread_core_local
152
153	/* Enable aborts now that we can receive exceptions */
154	msr	daifclr, #DAIFBIT_ABT
155
156	/*
157	 * Invalidate dcache for all memory used during initialization to
158	 * avoid nasty surprices when the cache is turned on. We must not
159	 * invalidate memory not used by OP-TEE since we may invalidate
160	 * entries used by for instance ARM Trusted Firmware.
161	 */
162	adr_l	x0, __text_start
163	ldr	x1, cached_mem_end
164	sub	x1, x1, x0
165	bl	dcache_cleaninv_range
166
167	/* Enable Console */
168	bl	console_init
169
170#ifdef CFG_CORE_ASLR
171	mov	x0, x20
172	bl	get_aslr_seed
173#else
174	mov	x0, #0
175#endif
176
177	adr	x1, boot_mmu_config
178	bl	core_init_mmu_map
179
180#ifdef CFG_CORE_ASLR
181	/*
182	 * Process relocation information again updating for the new
183	 * offset. We're doing this now before MMU is enabled as some of
184	 * the memory will become write protected.
185	 */
186	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
187	/*
188	 * Update cached_mem_end address with load offset since it was
189	 * calculated before relocation.
190	 */
191	adr	x5, cached_mem_end
192	ldr	x6, [x5]
193	add	x6, x6, x0
194	str	x6, [x5]
195	bl	relocate
196#endif
197
198	bl	__get_core_pos
199	bl	enable_mmu
200#ifdef CFG_CORE_ASLR
201	/*
202	 * Reinitialize console, since register_serial_console() has
203	 * previously registered a PA and with ASLR the VA is different
204	 * from the PA.
205	 */
206	bl	console_init
207#endif
208
209	mov	x0, x19		/* pagable part address */
210	mov	x1, #-1
211	bl	boot_init_primary_early
212#ifndef CFG_VIRTUALIZATION
213	mov	x21, sp
214	adr_l	x0, threads
215	ldr	x0, [x0, #THREAD_CTX_STACK_VA_END]
216	mov	sp, x0
217#endif
218	mov	x0, x20		/* DT address */
219	bl	boot_init_primary_late
220#ifndef CFG_VIRTUALIZATION
221	mov	sp, x21
222#endif
223
224	/*
225	 * In case we've touched memory that secondary CPUs will use before
226	 * they have turned on their D-cache, clean and invalidate the
227	 * D-cache before exiting to normal world.
228	 */
229	adr_l	x0, __text_start
230	ldr	x1, cached_mem_end
231	sub	x1, x1, x0
232	bl	dcache_cleaninv_range
233
234
235	/*
236	 * Clear current thread id now to allow the thread to be reused on
237	 * next entry. Matches the thread_init_boot_thread in
238	 * boot.c.
239	 */
240#ifndef CFG_VIRTUALIZATION
241	bl 	thread_clr_boot_thread
242#endif
243
244#ifdef CFG_CORE_FFA
245	adr	x0, cpu_on_handler
246	/*
247	 * Compensate for the load offset since cpu_on_handler() is
248	 * called with MMU off.
249	 */
250	ldr	x1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
251	sub	x0, x0, x1
252	bl	ffa_secondary_cpu_boot_req
253	b	thread_ffa_msg_wait
254#else
255	/*
256	 * Pass the vector address returned from main_init
257	 * Compensate for the load offset since cpu_on_handler() is
258	 * called with MMU off.
259	 */
260	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
261	adr	x1, thread_vector_table
262	sub	x1, x1, x0
263	mov	x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
264	smc	#0
265	b	.	/* SMC should not return */
266#endif
267END_FUNC _start
268DECLARE_KEEP_INIT _start
269
270	.balign	8
271LOCAL_DATA cached_mem_end , :
272	.skip	8
273END_DATA cached_mem_end
274
275#ifdef CFG_CORE_ASLR
276LOCAL_FUNC relocate , :
277	/* x0 holds load offset */
278#ifdef CFG_WITH_PAGER
279	adr_l	x6, __init_end
280#else
281	adr_l	x6, __end
282#endif
283	ldp	w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
284
285	mov_imm	x1, TEE_RAM_START
286	add	x2, x2, x6	/* start of relocations */
287	add	x3, x3, x2	/* end of relocations */
288
289	/*
290	 * Relocations are not formatted as Rela64, instead they are in a
291	 * compressed format created by get_reloc_bin() in
292	 * scripts/gen_tee_bin.py
293	 *
294	 * All the R_AARCH64_RELATIVE relocations are translated into a
295	 * list list of 32-bit offsets from TEE_RAM_START. At each address
296	 * a 64-bit value pointed out which increased with the load offset.
297	 */
298
299#ifdef CFG_WITH_PAGER
300	/*
301	 * With pager enabled we can only relocate the pager and init
302	 * parts, the rest has to be done when a page is populated.
303	 */
304	sub	x6, x6, x1
305#endif
306
307	b	2f
308	/* Loop over the relocation addresses and process all entries */
3091:	ldr	w4, [x2], #4
310#ifdef CFG_WITH_PAGER
311	/* Skip too large addresses */
312	cmp	x4, x6
313	b.ge	2f
314#endif
315	add	x4, x4, x1
316	ldr	x5, [x4]
317	add	x5, x5, x0
318	str	x5, [x4]
319
3202:	cmp	x2, x3
321	b.ne	1b
322
323	ret
324END_FUNC relocate
325#endif
326
327/*
328 * void enable_mmu(unsigned long core_pos);
329 *
330 * This function depends on being mapped with in the identity map where
331 * physical address and virtual address is the same. After MMU has been
332 * enabled the instruction pointer will be updated to execute as the new
333 * offset instead. Stack pointers and the return address are updated.
334 */
335LOCAL_FUNC enable_mmu , : , .identity_map
336	adr	x1, boot_mmu_config
337	load_xregs x1, 0, 2, 6
338	/*
339	 * x0 = core_pos
340	 * x2 = tcr_el1
341	 * x3 = mair_el1
342	 * x4 = ttbr0_el1_base
343	 * x5 = ttbr0_core_offset
344	 * x6 = load_offset
345	 */
346	msr	tcr_el1, x2
347	msr	mair_el1, x3
348
349	/*
350	 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos
351	 */
352	madd	x1, x5, x0, x4
353	msr	ttbr0_el1, x1
354	msr	ttbr1_el1, xzr
355	isb
356
357	/* Invalidate TLB */
358	tlbi	vmalle1
359
360	/*
361	 * Make sure translation table writes have drained into memory and
362	 * the TLB invalidation is complete.
363	 */
364	dsb	sy
365	isb
366
367	/* Enable the MMU */
368	mrs	x1, sctlr_el1
369	orr	x1, x1, #SCTLR_M
370	msr	sctlr_el1, x1
371	isb
372
373	/* Update vbar */
374	mrs	x1, vbar_el1
375	add	x1, x1, x6
376	msr	vbar_el1, x1
377	isb
378
379	/* Invalidate instruction cache and branch predictor */
380	ic	iallu
381	isb
382
383	/* Enable I and D cache */
384	mrs	x1, sctlr_el1
385	orr	x1, x1, #SCTLR_I
386	orr	x1, x1, #SCTLR_C
387	msr	sctlr_el1, x1
388	isb
389
390	/* Adjust stack pointers and return address */
391	msr	spsel, #1
392	add	sp, sp, x6
393	msr	spsel, #0
394	add	sp, sp, x6
395	add	x30, x30, x6
396
397	ret
398END_FUNC enable_mmu
399
400	.balign	8
401DATA boot_mmu_config , : /* struct core_mmu_config */
402	.skip	CORE_MMU_CONFIG_SIZE
403END_DATA boot_mmu_config
404
405FUNC cpu_on_handler , :
406	mov	x19, x0
407	mov	x20, x1
408	mov	x21, x30
409
410	adr	x0, reset_vect_table
411	msr	vbar_el1, x0
412	isb
413
414	set_sctlr_el1
415	isb
416
417	/* Enable aborts now that we can receive exceptions */
418	msr	daifclr, #DAIFBIT_ABT
419
420	bl	__get_core_pos
421	bl	enable_mmu
422
423	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
424	set_sp
425
426	mov	x0, x19
427	mov	x1, x20
428#ifdef CFG_CORE_FFA
429	bl	boot_cpu_on_handler
430	b	thread_ffa_msg_wait
431#else
432	mov	x30, x21
433	b	boot_cpu_on_handler
434#endif
435END_FUNC cpu_on_handler
436DECLARE_KEEP_PAGER cpu_on_handler
437
438LOCAL_FUNC unhandled_cpu , :
439	wfi
440	b	unhandled_cpu
441END_FUNC unhandled_cpu
442
443	/*
444	 * This macro verifies that the a given vector doesn't exceed the
445	 * architectural limit of 32 instructions. This is meant to be placed
446	 * immedately after the last instruction in the vector. It takes the
447	 * vector entry as the parameter
448	 */
449	.macro check_vector_size since
450	  .if (. - \since) > (32 * 4)
451	    .error "Vector exceeds 32 instructions"
452	  .endif
453	.endm
454
455	.section .identity_map, "ax", %progbits
456	.align	11
457LOCAL_FUNC reset_vect_table , :, .identity_map
458	/* -----------------------------------------------------
459	 * Current EL with SP0 : 0x0 - 0x180
460	 * -----------------------------------------------------
461	 */
462SynchronousExceptionSP0:
463	b	SynchronousExceptionSP0
464	check_vector_size SynchronousExceptionSP0
465
466	.align	7
467IrqSP0:
468	b	IrqSP0
469	check_vector_size IrqSP0
470
471	.align	7
472FiqSP0:
473	b	FiqSP0
474	check_vector_size FiqSP0
475
476	.align	7
477SErrorSP0:
478	b	SErrorSP0
479	check_vector_size SErrorSP0
480
481	/* -----------------------------------------------------
482	 * Current EL with SPx: 0x200 - 0x380
483	 * -----------------------------------------------------
484	 */
485	.align	7
486SynchronousExceptionSPx:
487	b	SynchronousExceptionSPx
488	check_vector_size SynchronousExceptionSPx
489
490	.align	7
491IrqSPx:
492	b	IrqSPx
493	check_vector_size IrqSPx
494
495	.align	7
496FiqSPx:
497	b	FiqSPx
498	check_vector_size FiqSPx
499
500	.align	7
501SErrorSPx:
502	b	SErrorSPx
503	check_vector_size SErrorSPx
504
505	/* -----------------------------------------------------
506	 * Lower EL using AArch64 : 0x400 - 0x580
507	 * -----------------------------------------------------
508	 */
509	.align	7
510SynchronousExceptionA64:
511	b	SynchronousExceptionA64
512	check_vector_size SynchronousExceptionA64
513
514	.align	7
515IrqA64:
516	b	IrqA64
517	check_vector_size IrqA64
518
519	.align	7
520FiqA64:
521	b	FiqA64
522	check_vector_size FiqA64
523
524	.align	7
525SErrorA64:
526	b   	SErrorA64
527	check_vector_size SErrorA64
528
529	/* -----------------------------------------------------
530	 * Lower EL using AArch32 : 0x0 - 0x180
531	 * -----------------------------------------------------
532	 */
533	.align	7
534SynchronousExceptionA32:
535	b	SynchronousExceptionA32
536	check_vector_size SynchronousExceptionA32
537
538	.align	7
539IrqA32:
540	b	IrqA32
541	check_vector_size IrqA32
542
543	.align	7
544FiqA32:
545	b	FiqA32
546	check_vector_size FiqA32
547
548	.align	7
549SErrorA32:
550	b	SErrorA32
551	check_vector_size SErrorA32
552
553END_FUNC reset_vect_table
554