xref: /optee_os/core/arch/arm/plat-sunxi/main.c (revision c61c04b837302c854a3c9f650118dd7be548302b)
1 /*
2  * Copyright (c) 2014, Allwinner Technology Co., Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <platform_config.h>
29 
30 #include <stdint.h>
31 #include <string.h>
32 #include <assert.h>
33 
34 #include <sm/sm.h>
35 #include <sm/tee_mon.h>
36 #include <sm/teesmc.h>
37 #include <sm/teesmc_optee.h>
38 
39 #include <arm.h>
40 #include <kernel/thread.h>
41 #include <kernel/time_source.h>
42 #include <kernel/panic.h>
43 #include <kernel/misc.h>
44 #include <mm/tee_pager.h>
45 #include <mm/tee_mmu.h>
46 #include <mm/core_mmu.h>
47 #include <mm/tee_mmu_defs.h>
48 #include <tee/entry.h>
49 #include <tee/arch_svc.h>
50 #include <platform.h>
51 #include <util.h>
52 #include <trace.h>
53 #include <malloc.h>
54 
55 /* teecore heap address/size is defined in scatter file */
56 extern unsigned char teecore_heap_start;
57 extern unsigned char teecore_heap_end;
58 
59 /* Main MMU L1 table for teecore */
60 static uint32_t main_mmu_l1_ttb[TEE_MMU_L1_NUM_ENTRIES]
61 	__attribute__((section(".nozi.mmu.l1"),
62 	aligned(TEE_MMU_L1_ALIGNMENT)));
63 
64 /* Main MMU L2 table for teecore */
65 static uint32_t main_mmu_l2_ttb[TEE_MMU_L2_NUM_ENTRIES]
66 	__attribute__((section(".nozi.mmu.l2"),
67 		       aligned(TEE_MMU_L2_ALIGNMENT)));
68 
69 /* MMU L1 table for TAs, one for each Core */
70 static uint32_t main_mmu_ul1_ttb[CFG_NUM_THREADS][TEE_MMU_UL1_NUM_ENTRIES]
71 	__attribute__((section(".nozi.mmu.ul1"),
72 	aligned(TEE_MMU_UL1_ALIGNMENT)));
73 
74 static void main_fiq(void);
75 static void main_tee_entry(struct thread_smc_args *args);
76 static uint32_t main_default_pm_handler(uint32_t a0, uint32_t a1);
77 
78 static const struct thread_handlers handlers = {
79 	.std_smc = main_tee_entry,
80 	.fast_smc = main_tee_entry,
81 	.fiq = main_fiq,
82 	.svc = tee_svc_handler,
83 	.abort = tee_pager_abort_handler,
84 	.cpu_on = main_default_pm_handler,
85 	.cpu_off = main_default_pm_handler,
86 	.cpu_suspend = main_default_pm_handler,
87 	.cpu_resume = main_default_pm_handler,
88 	.system_off = main_default_pm_handler,
89 	.system_reset = main_default_pm_handler,
90 };
91 
92 void main_init(uint32_t nsec_entry); /* called from assembly only */
93 void main_init(uint32_t nsec_entry)
94 {
95 	struct sm_nsec_ctx *nsec_ctx;
96 	size_t pos = get_core_pos();
97 
98 	/*
99 	 * Mask IRQ and FIQ before switch to the thread vector as the
100 	 * thread handler requires IRQ and FIQ to be masked while executing
101 	 * with the temporary stack. The thread subsystem also asserts that
102 	 * IRQ is blocked when using most if its functions.
103 	 */
104 	write_cpsr(read_cpsr() | CPSR_F | CPSR_I);
105 
106 	if (pos == 0) {
107 		thread_init_primary(&handlers);
108 
109 		/* initialize platform */
110 		platform_init();
111 	}
112 
113 	thread_init_per_cpu();
114 
115 	/* Initialize secure monitor */
116 	nsec_ctx = sm_get_nsec_ctx();
117 	nsec_ctx->mon_lr = nsec_entry;
118 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
119 
120 	if (pos == 0) {
121 		unsigned long a, s;
122 		/* core malloc pool init */
123 #ifdef CFG_TEE_MALLOC_START
124 		a = CFG_TEE_MALLOC_START;
125 		s = CFG_TEE_MALLOC_SIZE;
126 #else
127 		a = (unsigned long)&teecore_heap_start;
128 		s = (unsigned long)&teecore_heap_end;
129 		a = ((a + 1) & ~0x0FFFF) + 0x10000;	/* 64kB aligned */
130 		s = s & ~0x0FFFF;	/* 64kB aligned */
131 		s = s - a;
132 #endif
133 		malloc_init((void *)a, s);
134 
135 		teecore_init_ta_ram();
136 
137 		if (init_teecore() != TEE_SUCCESS) {
138 			panic();
139 		}
140 	}
141 
142 	IMSG("optee initialize finished\n");
143 }
144 
145 static void main_fiq(void)
146 {
147 	panic();
148 }
149 
150 static uint32_t main_default_pm_handler(uint32_t a0, uint32_t a1)
151 {
152 	/*
153 	 * This function is not supported in this configuration, and
154 	 * should never be called. Panic to catch unintended calls.
155 	 */
156 	(void)&a0;
157 	(void)&a1;
158 	panic();
159 	return 1;
160 }
161 
162 static void main_tee_entry(struct thread_smc_args *args)
163 {
164 	/* TODO move to main_init() */
165 	if (init_teecore() != TEE_SUCCESS)
166 		panic();
167 
168 	/*
169 	 * This function first catches platform specific SMC functions
170 	 * if none matches, the generic tee_entry is called.
171 	 */
172 	if (args->a0 == TEESMC32_OPTEE_FASTCALL_GET_SHM_CONFIG) {
173 		args->a0 = TEESMC_RETURN_OK;
174 		args->a1 = default_nsec_shm_paddr;
175 		args->a2 = default_nsec_shm_size;
176 		/* Should this be TEESMC cache attributes instead? */
177 		args->a3 = core_mmu_is_shm_cached();
178 		return;
179 	}
180 
181 	if (args->a0 == TEESMC32_OPTEE_FASTCALL_L2CC_MUTEX) {
182 		switch (args->a1) {
183 		case TEESMC_OPTEE_L2CC_MUTEX_GET_ADDR:
184 		case TEESMC_OPTEE_L2CC_MUTEX_SET_ADDR:
185 		case TEESMC_OPTEE_L2CC_MUTEX_ENABLE:
186 		case TEESMC_OPTEE_L2CC_MUTEX_DISABLE:
187 			/* A80 platform not support L2CC_MUTEX */
188 			args->a0 = TEESMC_RETURN_UNKNOWN_FUNCTION;
189 			return;
190 		default:
191 			args->a0 = TEESMC_RETURN_EBADCMD;
192 			return;
193 		}
194 	}
195 
196 	/* SiP Service Call Count */
197 	if (args->a0 == TEESMC32_SIP_SUNXI_CALLS_COUNT) {
198 		args->a0 = 1;
199 		return;
200 	}
201 
202 	/*  SiP Service Call UID */
203 	if (args->a0 == TEESMC32_SIP_SUNXI_CALLS_UID) {
204 		args->a0 = TEESMC_SIP_SUNXI_UID_R0;
205 		args->a1 = TEESMC_SIP_SUNXI_UID_R1;
206 		args->a2 = TEESMC_SIP_SUNXI_UID_R2;
207 		args->a3 = TEESMC_SIP_SUNXI_UID_R3;
208 		return;
209 	}
210 
211 	/* SiP Service Calls */
212 	if (args->a0 == TEESMC32_OPTEE_FAST_CALL_SIP_SUNXI) {
213 		platform_smc_handle(args);
214 		return;
215 	}
216 
217 	tee_entry(args);
218 }
219 
220 
221 /* Override weak function in tee/entry.c */
222 void tee_entry_get_api_call_count(struct thread_smc_args *args)
223 {
224 	args->a0 = tee_entry_generic_get_api_call_count() + 2;
225 }
226 
227 /* Override weak function in tee/entry.c */
228 void tee_entry_get_api_uuid(struct thread_smc_args *args)
229 {
230 	args->a0 = TEESMC_OPTEE_UID_R0;
231 	args->a1 = TEESMC_OPTEE_UID_R1;
232 	args->a2 = TEESMC_OPTEE_UID_R2;
233 	args->a3 = TEESMC_OPTEE_UID32_R3;
234 }
235 
236 /* Override weak function in tee/entry.c */
237 void tee_entry_get_api_revision(struct thread_smc_args *args)
238 {
239 	args->a0 = TEESMC_OPTEE_REVISION_MAJOR;
240 	args->a1 = TEESMC_OPTEE_REVISION_MINOR;
241 }
242 
243 /* Override weak function in tee/entry.c */
244 void tee_entry_get_os_uuid(struct thread_smc_args *args)
245 {
246 	args->a0 = TEESMC_OS_OPTEE_UUID_R0;
247 	args->a1 = TEESMC_OS_OPTEE_UUID_R1;
248 	args->a2 = TEESMC_OS_OPTEE_UUID_R2;
249 	args->a3 = TEESMC_OS_OPTEE_UUID_R3;
250 }
251 
252 /* Override weak function in tee/entry.c */
253 void tee_entry_get_os_revision(struct thread_smc_args *args)
254 {
255 	args->a0 = TEESMC_OS_OPTEE_REVISION_MAJOR;
256 	args->a1 = TEESMC_OS_OPTEE_REVISION_MINOR;
257 }
258 
259 paddr_t core_mmu_get_main_ttb_pa(void)
260 {
261 	/* Note that this depends on flat mapping of TEE Core */
262 	paddr_t pa = (paddr_t)core_mmu_get_main_ttb_va();
263 
264 	TEE_ASSERT(!(pa & ~TEE_MMU_TTB_L1_MASK));
265 	return pa;
266 }
267 
268 vaddr_t core_mmu_get_main_ttb_va(void)
269 {
270 	return (vaddr_t)main_mmu_l1_ttb;
271 }
272 
273 paddr_t core_mmu_get_ul1_ttb_pa(void)
274 {
275 	/* Note that this depends on flat mapping of TEE Core */
276 	paddr_t pa = (paddr_t)core_mmu_get_ul1_ttb_va();
277 
278 	TEE_ASSERT(!(pa & ~TEE_MMU_TTB_UL1_MASK));
279 	return pa;
280 }
281 
282 vaddr_t core_mmu_get_ul1_ttb_va(void)
283 {
284 	return (vaddr_t)main_mmu_ul1_ttb[thread_get_id()];
285 }
286 
287 void *core_mmu_alloc_l2(struct tee_mmap_region *mm)
288 {
289 	/* Can't have this in .bss since it's not initialized yet */
290 	static size_t l2_offs __attribute__((section(".data")));
291 	const size_t l2_va_size = TEE_MMU_L2_NUM_ENTRIES * SMALL_PAGE_SIZE;
292 	size_t l2_va_space = ((sizeof(main_mmu_l2_ttb) - l2_offs) /
293 			     TEE_MMU_L2_SIZE) * l2_va_size;
294 
295 	if (l2_offs)
296 		return NULL;
297 	if (mm->size > l2_va_space)
298 		return NULL;
299 	l2_offs += ROUNDUP(mm->size, l2_va_size) / l2_va_size;
300 	return main_mmu_l2_ttb;
301 }
302