1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <stdint.h> 28 #include <string.h> 29 #include <sm/sm.h> 30 #include <sm/tee_mon.h> 31 #include <sm/teesmc.h> 32 #include <sm/teesmc_optee.h> 33 #include <arm.h> 34 #include <kernel/thread.h> 35 #include <kernel/panic.h> 36 #include <kernel/pm_stubs.h> 37 #include <malloc.h> 38 #include <util.h> 39 #include <trace.h> 40 #include <kernel/misc.h> 41 #include <mm/tee_pager.h> 42 #include <mm/core_mmu.h> 43 #include <mm/tee_mmu.h> 44 #include <mm/tee_mmu_defs.h> 45 #include <tee/entry.h> 46 #include <tee/arch_svc.h> 47 #include <console.h> 48 #include <asc.h> 49 #include <kernel/tee_l2cc_mutex.h> 50 #include <assert.h> 51 #include <platform_config.h> 52 53 /* teecore heap address/size is defined in scatter file */ 54 extern unsigned char teecore_heap_start; 55 extern unsigned char teecore_heap_end; 56 57 58 static void main_fiq(void); 59 static void main_tee_entry(struct thread_smc_args *args); 60 61 static const struct thread_handlers handlers = { 62 .std_smc = main_tee_entry, 63 .fast_smc = main_tee_entry, 64 .fiq = main_fiq, 65 .svc = tee_svc_handler, 66 .abort = tee_pager_abort_handler, 67 .cpu_on = pm_panic, 68 .cpu_off = pm_panic, 69 .cpu_suspend = pm_panic, 70 .cpu_resume = pm_panic, 71 .system_off = pm_panic, 72 .system_reset = pm_panic, 73 }; 74 75 void main_init(uint32_t nsec_entry); /* called from assembly only */ 76 void main_init(uint32_t nsec_entry) 77 { 78 struct sm_nsec_ctx *nsec_ctx; 79 size_t pos = get_core_pos(); 80 81 /* 82 * Mask IRQ and FIQ before switch to the thread vector as the 83 * thread handler requires IRQ and FIQ to be masked while executing 84 * with the temporary stack. The thread subsystem also asserts that 85 * IRQ is blocked when using most if its functions. 86 */ 87 write_cpsr(read_cpsr() | CPSR_F | CPSR_I); 88 89 if (pos == 0) 90 thread_init_primary(&handlers); 91 92 thread_init_per_cpu(); 93 94 /* Initialize secure monitor */ 95 nsec_ctx = sm_get_nsec_ctx(); 96 nsec_ctx->mon_lr = nsec_entry; 97 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; 98 99 if (pos == 0) { 100 unsigned long a, s; 101 /* core malloc pool init */ 102 #ifdef CFG_TEE_MALLOC_START 103 a = CFG_TEE_MALLOC_START; 104 s = CFG_TEE_MALLOC_SIZE; 105 #else 106 a = (unsigned long)&teecore_heap_start; 107 s = (unsigned long)&teecore_heap_end; 108 a = ((a + 1) & ~0x0FFFF) + 0x10000; /* 64kB aligned */ 109 s = s & ~0x0FFFF; /* 64kB aligned */ 110 s = s - a; 111 #endif 112 malloc_init((void *)a, s); 113 114 teecore_init_ta_ram(); 115 } 116 } 117 118 static void main_fiq(void) 119 { 120 panic(); 121 } 122 123 static void main_tee_entry(struct thread_smc_args *args) 124 { 125 /* 126 * This function first catches all ST specific SMC functions 127 * if none matches, the generic tee_entry is called. 128 */ 129 int ret; 130 131 /* TODO move to main_init() */ 132 if (init_teecore() != TEE_SUCCESS) 133 panic(); 134 135 if (args->a0 == TEESMC32_OPTEE_FASTCALL_GET_SHM_CONFIG) { 136 args->a0 = TEESMC_RETURN_OK; 137 args->a1 = default_nsec_shm_paddr; 138 args->a2 = default_nsec_shm_size; 139 /* Should this be TEESMC cache attributes instead? */ 140 args->a3 = core_mmu_is_shm_cached(); 141 return; 142 } 143 144 if (args->a0 == TEESMC32_OPTEE_FASTCALL_L2CC_MUTEX) { 145 switch (args->a1) { 146 case TEESMC_OPTEE_L2CC_MUTEX_GET_ADDR: 147 ret = tee_l2cc_mutex_configure( 148 SERVICEID_GET_L2CC_MUTEX, &args->a2); 149 break; 150 case TEESMC_OPTEE_L2CC_MUTEX_SET_ADDR: 151 ret = tee_l2cc_mutex_configure( 152 SERVICEID_SET_L2CC_MUTEX, &args->a2); 153 break; 154 case TEESMC_OPTEE_L2CC_MUTEX_ENABLE: 155 ret = tee_l2cc_mutex_configure( 156 SERVICEID_ENABLE_L2CC_MUTEX, NULL); 157 break; 158 case TEESMC_OPTEE_L2CC_MUTEX_DISABLE: 159 ret = tee_l2cc_mutex_configure( 160 SERVICEID_DISABLE_L2CC_MUTEX, NULL); 161 break; 162 default: 163 args->a0 = TEESMC_RETURN_EBADCMD; 164 return; 165 } 166 if (ret) 167 args->a0 = TEESMC_RETURN_EBADADDR; 168 else 169 args->a0 = TEESMC_RETURN_OK; 170 return; 171 } 172 173 tee_entry(args); 174 } 175 176 177 /* Override weak function in tee/entry.c */ 178 void tee_entry_get_api_call_count(struct thread_smc_args *args) 179 { 180 args->a0 = tee_entry_generic_get_api_call_count() + 2; 181 } 182 183 /* Override weak function in tee/entry.c */ 184 void tee_entry_get_api_uuid(struct thread_smc_args *args) 185 { 186 args->a0 = TEESMC_OPTEE_UID_R0; 187 args->a1 = TEESMC_OPTEE_UID_R1; 188 args->a2 = TEESMC_OPTEE_UID_R2; 189 args->a3 = TEESMC_OPTEE_UID32_R3; 190 } 191 192 /* Override weak function in tee/entry.c */ 193 void tee_entry_get_api_revision(struct thread_smc_args *args) 194 { 195 args->a0 = TEESMC_OPTEE_REVISION_MAJOR; 196 args->a1 = TEESMC_OPTEE_REVISION_MINOR; 197 } 198 199 /* Override weak function in tee/entry.c */ 200 void tee_entry_get_os_uuid(struct thread_smc_args *args) 201 { 202 args->a0 = TEESMC_OS_OPTEE_UUID_R0; 203 args->a1 = TEESMC_OS_OPTEE_UUID_R1; 204 args->a2 = TEESMC_OS_OPTEE_UUID_R2; 205 args->a3 = TEESMC_OS_OPTEE_UUID_R3; 206 } 207 208 /* Override weak function in tee/entry.c */ 209 void tee_entry_get_os_revision(struct thread_smc_args *args) 210 { 211 args->a0 = TEESMC_OS_OPTEE_REVISION_MAJOR; 212 args->a1 = TEESMC_OS_OPTEE_REVISION_MINOR; 213 } 214 215 /* ttbr1 for teecore mapping: 16kB, fixed addr. */ 216 extern uint8_t *SEC_MMU_TTB_FLD; 217 /* ttbr0 for TA mapping (default was 128kB) */ 218 extern uint8_t *SEC_TA_MMU_TTB_FLD; 219 220 paddr_t core_mmu_get_main_ttb_pa(void) 221 { 222 /* Note that this depends on flat mapping of TEE Core */ 223 paddr_t pa = (paddr_t)core_mmu_get_main_ttb_va(); 224 225 TEE_ASSERT(!(pa & ~TEE_MMU_TTB_L1_MASK)); 226 return pa; 227 } 228 229 vaddr_t core_mmu_get_main_ttb_va(void) 230 { 231 return (vaddr_t)&SEC_MMU_TTB_FLD; 232 } 233 234 paddr_t core_mmu_get_ul1_ttb_pa(void) 235 { 236 /* Note that this depends on flat mapping of TEE Core */ 237 paddr_t pa = (paddr_t)core_mmu_get_ul1_ttb_va(); 238 TEE_ASSERT(!(pa & ~TEE_MMU_TTB_UL1_MASK)); 239 return pa; 240 } 241 242 vaddr_t core_mmu_get_ul1_ttb_va(void) 243 { 244 return (vaddr_t)&SEC_TA_MMU_TTB_FLD; 245 } 246 247 void console_putc(int ch) 248 { 249 __asc_xmit_char((char)ch); 250 } 251 252 void console_flush(void) 253 { 254 __asc_flush(); 255 } 256 257 /* L2 translation table(s) for teecore mapping: fixed addr. */ 258 extern uint8_t *SEC_MMU_L2_TTB_FLD; 259 extern uint8_t *SEC_MMU_L2_TTB_END; 260 261 void *core_mmu_alloc_l2(struct tee_mmap_region *mm) 262 { 263 /* Can't have this in .bss since it's not initialized yet */ 264 static size_t l2_offs __attribute__((section(".data"))); 265 const size_t l2_size = SEC_MMU_L2_TTB_END - SEC_MMU_L2_TTB_FLD; 266 const size_t l2_va_size = TEE_MMU_L2_NUM_ENTRIES * SMALL_PAGE_SIZE; 267 size_t l2_va_space = ((l2_size - l2_offs) / TEE_MMU_L2_SIZE) * 268 l2_va_size; 269 270 if (l2_offs) 271 return NULL; 272 if (mm->size > l2_va_space) 273 return NULL; 274 l2_offs += ROUNDUP(mm->size, l2_va_size) / l2_va_size; 275 return SEC_MMU_L2_TTB_FLD; 276 } 277