1*948c090dSVarun Wadekar /* 2*948c090dSVarun Wadekar * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3*948c090dSVarun Wadekar * 4*948c090dSVarun Wadekar * Redistribution and use in source and binary forms, with or without 5*948c090dSVarun Wadekar * modification, are permitted provided that the following conditions are met: 6*948c090dSVarun Wadekar * 7*948c090dSVarun Wadekar * Redistributions of source code must retain the above copyright notice, this 8*948c090dSVarun Wadekar * list of conditions and the following disclaimer. 9*948c090dSVarun Wadekar * 10*948c090dSVarun Wadekar * Redistributions in binary form must reproduce the above copyright notice, 11*948c090dSVarun Wadekar * this list of conditions and the following disclaimer in the documentation 12*948c090dSVarun Wadekar * and/or other materials provided with the distribution. 13*948c090dSVarun Wadekar * 14*948c090dSVarun Wadekar * Neither the name of ARM nor the names of its contributors may be used 15*948c090dSVarun Wadekar * to endorse or promote products derived from this software without specific 16*948c090dSVarun Wadekar * prior written permission. 17*948c090dSVarun Wadekar * 18*948c090dSVarun Wadekar * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19*948c090dSVarun Wadekar * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20*948c090dSVarun Wadekar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21*948c090dSVarun Wadekar * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22*948c090dSVarun Wadekar * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23*948c090dSVarun Wadekar * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24*948c090dSVarun Wadekar * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25*948c090dSVarun Wadekar * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26*948c090dSVarun Wadekar * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27*948c090dSVarun Wadekar * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28*948c090dSVarun Wadekar * POSSIBILITY OF SUCH DAMAGE. 29*948c090dSVarun Wadekar */ 30*948c090dSVarun Wadekar 31*948c090dSVarun Wadekar #include <assert.h> 32*948c090dSVarun Wadekar #include <bl_common.h> 33*948c090dSVarun Wadekar #include <bl31.h> 34*948c090dSVarun Wadekar #include <context_mgmt.h> 35*948c090dSVarun Wadekar #include <debug.h> 36*948c090dSVarun Wadekar #include <interrupt_mgmt.h> 37*948c090dSVarun Wadekar #include <platform.h> 38*948c090dSVarun Wadekar #include <runtime_svc.h> 39*948c090dSVarun Wadekar #include <string.h> 40*948c090dSVarun Wadekar 41*948c090dSVarun Wadekar #include "smcall.h" 42*948c090dSVarun Wadekar #include "sm_err.h" 43*948c090dSVarun Wadekar 44*948c090dSVarun Wadekar struct trusty_stack { 45*948c090dSVarun Wadekar uint8_t space[PLATFORM_STACK_SIZE] __aligned(16); 46*948c090dSVarun Wadekar }; 47*948c090dSVarun Wadekar 48*948c090dSVarun Wadekar struct trusty_cpu_ctx { 49*948c090dSVarun Wadekar cpu_context_t cpu_ctx; 50*948c090dSVarun Wadekar void *saved_sp; 51*948c090dSVarun Wadekar uint32_t saved_security_state; 52*948c090dSVarun Wadekar int fiq_handler_active; 53*948c090dSVarun Wadekar uint64_t fiq_handler_pc; 54*948c090dSVarun Wadekar uint64_t fiq_handler_cpsr; 55*948c090dSVarun Wadekar uint64_t fiq_handler_sp; 56*948c090dSVarun Wadekar uint64_t fiq_pc; 57*948c090dSVarun Wadekar uint64_t fiq_cpsr; 58*948c090dSVarun Wadekar uint64_t fiq_sp_el1; 59*948c090dSVarun Wadekar gp_regs_t fiq_gpregs; 60*948c090dSVarun Wadekar struct trusty_stack secure_stack; 61*948c090dSVarun Wadekar }; 62*948c090dSVarun Wadekar 63*948c090dSVarun Wadekar struct args { 64*948c090dSVarun Wadekar uint64_t r0; 65*948c090dSVarun Wadekar uint64_t r1; 66*948c090dSVarun Wadekar uint64_t r2; 67*948c090dSVarun Wadekar uint64_t r3; 68*948c090dSVarun Wadekar }; 69*948c090dSVarun Wadekar 70*948c090dSVarun Wadekar struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT]; 71*948c090dSVarun Wadekar 72*948c090dSVarun Wadekar struct args trusty_init_context_stack(void **sp, void *new_stack); 73*948c090dSVarun Wadekar struct args trusty_context_switch_helper(void **sp, uint64_t r0, uint64_t r1, 74*948c090dSVarun Wadekar uint64_t r2, uint64_t r3); 75*948c090dSVarun Wadekar 76*948c090dSVarun Wadekar static struct trusty_cpu_ctx *get_trusty_ctx(void) 77*948c090dSVarun Wadekar { 78*948c090dSVarun Wadekar return &trusty_cpu_ctx[plat_my_core_pos()]; 79*948c090dSVarun Wadekar } 80*948c090dSVarun Wadekar 81*948c090dSVarun Wadekar static struct args trusty_context_switch(uint32_t security_state, uint64_t r0, 82*948c090dSVarun Wadekar uint64_t r1, uint64_t r2, uint64_t r3) 83*948c090dSVarun Wadekar { 84*948c090dSVarun Wadekar struct args ret; 85*948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 86*948c090dSVarun Wadekar 87*948c090dSVarun Wadekar assert(ctx->saved_security_state != security_state); 88*948c090dSVarun Wadekar 89*948c090dSVarun Wadekar cm_el1_sysregs_context_save(security_state); 90*948c090dSVarun Wadekar 91*948c090dSVarun Wadekar ctx->saved_security_state = security_state; 92*948c090dSVarun Wadekar ret = trusty_context_switch_helper(&ctx->saved_sp, r0, r1, r2, r3); 93*948c090dSVarun Wadekar 94*948c090dSVarun Wadekar assert(ctx->saved_security_state == !security_state); 95*948c090dSVarun Wadekar 96*948c090dSVarun Wadekar cm_el1_sysregs_context_restore(security_state); 97*948c090dSVarun Wadekar cm_set_next_eret_context(security_state); 98*948c090dSVarun Wadekar 99*948c090dSVarun Wadekar return ret; 100*948c090dSVarun Wadekar } 101*948c090dSVarun Wadekar 102*948c090dSVarun Wadekar static uint64_t trusty_fiq_handler(uint32_t id, 103*948c090dSVarun Wadekar uint32_t flags, 104*948c090dSVarun Wadekar void *handle, 105*948c090dSVarun Wadekar void *cookie) 106*948c090dSVarun Wadekar { 107*948c090dSVarun Wadekar struct args ret; 108*948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 109*948c090dSVarun Wadekar 110*948c090dSVarun Wadekar assert(!is_caller_secure(flags)); 111*948c090dSVarun Wadekar 112*948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0); 113*948c090dSVarun Wadekar if (ret.r0) { 114*948c090dSVarun Wadekar SMC_RET0(handle); 115*948c090dSVarun Wadekar } 116*948c090dSVarun Wadekar 117*948c090dSVarun Wadekar if (ctx->fiq_handler_active) { 118*948c090dSVarun Wadekar INFO("%s: fiq handler already active\n", __func__); 119*948c090dSVarun Wadekar SMC_RET0(handle); 120*948c090dSVarun Wadekar } 121*948c090dSVarun Wadekar 122*948c090dSVarun Wadekar ctx->fiq_handler_active = 1; 123*948c090dSVarun Wadekar memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs)); 124*948c090dSVarun Wadekar ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3); 125*948c090dSVarun Wadekar ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3); 126*948c090dSVarun Wadekar ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1); 127*948c090dSVarun Wadekar 128*948c090dSVarun Wadekar write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp); 129*948c090dSVarun Wadekar cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr); 130*948c090dSVarun Wadekar 131*948c090dSVarun Wadekar SMC_RET0(handle); 132*948c090dSVarun Wadekar } 133*948c090dSVarun Wadekar 134*948c090dSVarun Wadekar static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu, 135*948c090dSVarun Wadekar uint64_t handler, uint64_t stack) 136*948c090dSVarun Wadekar { 137*948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx; 138*948c090dSVarun Wadekar 139*948c090dSVarun Wadekar if (cpu >= PLATFORM_CORE_COUNT) { 140*948c090dSVarun Wadekar ERROR("%s: cpu %ld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT); 141*948c090dSVarun Wadekar return SM_ERR_INVALID_PARAMETERS; 142*948c090dSVarun Wadekar } 143*948c090dSVarun Wadekar 144*948c090dSVarun Wadekar ctx = &trusty_cpu_ctx[cpu]; 145*948c090dSVarun Wadekar ctx->fiq_handler_pc = handler; 146*948c090dSVarun Wadekar ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3); 147*948c090dSVarun Wadekar ctx->fiq_handler_sp = stack; 148*948c090dSVarun Wadekar 149*948c090dSVarun Wadekar SMC_RET1(handle, 0); 150*948c090dSVarun Wadekar } 151*948c090dSVarun Wadekar 152*948c090dSVarun Wadekar static uint64_t trusty_get_fiq_regs(void *handle) 153*948c090dSVarun Wadekar { 154*948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 155*948c090dSVarun Wadekar uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0); 156*948c090dSVarun Wadekar 157*948c090dSVarun Wadekar SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1); 158*948c090dSVarun Wadekar } 159*948c090dSVarun Wadekar 160*948c090dSVarun Wadekar static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3) 161*948c090dSVarun Wadekar { 162*948c090dSVarun Wadekar struct args ret; 163*948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 164*948c090dSVarun Wadekar 165*948c090dSVarun Wadekar if (!ctx->fiq_handler_active) { 166*948c090dSVarun Wadekar NOTICE("%s: fiq handler not active\n", __func__); 167*948c090dSVarun Wadekar SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS); 168*948c090dSVarun Wadekar } 169*948c090dSVarun Wadekar 170*948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0); 171*948c090dSVarun Wadekar if (ret.r0 != 1) { 172*948c090dSVarun Wadekar INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %ld\n", 173*948c090dSVarun Wadekar __func__, handle, ret.r0); 174*948c090dSVarun Wadekar } 175*948c090dSVarun Wadekar 176*948c090dSVarun Wadekar /* 177*948c090dSVarun Wadekar * Restore register state to state recorded on fiq entry. 178*948c090dSVarun Wadekar * 179*948c090dSVarun Wadekar * x0, sp_el1, pc and cpsr need to be restored because el1 cannot 180*948c090dSVarun Wadekar * restore them. 181*948c090dSVarun Wadekar * 182*948c090dSVarun Wadekar * x1-x4 and x8-x17 need to be restored here because smc_handler64 183*948c090dSVarun Wadekar * corrupts them (el1 code also restored them). 184*948c090dSVarun Wadekar */ 185*948c090dSVarun Wadekar memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs)); 186*948c090dSVarun Wadekar ctx->fiq_handler_active = 0; 187*948c090dSVarun Wadekar write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1); 188*948c090dSVarun Wadekar cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr); 189*948c090dSVarun Wadekar 190*948c090dSVarun Wadekar SMC_RET0(handle); 191*948c090dSVarun Wadekar } 192*948c090dSVarun Wadekar 193*948c090dSVarun Wadekar static uint64_t trusty_smc_handler(uint32_t smc_fid, 194*948c090dSVarun Wadekar uint64_t x1, 195*948c090dSVarun Wadekar uint64_t x2, 196*948c090dSVarun Wadekar uint64_t x3, 197*948c090dSVarun Wadekar uint64_t x4, 198*948c090dSVarun Wadekar void *cookie, 199*948c090dSVarun Wadekar void *handle, 200*948c090dSVarun Wadekar uint64_t flags) 201*948c090dSVarun Wadekar { 202*948c090dSVarun Wadekar struct args ret; 203*948c090dSVarun Wadekar 204*948c090dSVarun Wadekar if (is_caller_secure(flags)) { 205*948c090dSVarun Wadekar if (smc_fid == SMC_SC_NS_RETURN) { 206*948c090dSVarun Wadekar ret = trusty_context_switch(SECURE, x1, 0, 0, 0); 207*948c090dSVarun Wadekar SMC_RET4(handle, ret.r0, ret.r1, ret.r2, ret.r3); 208*948c090dSVarun Wadekar } 209*948c090dSVarun Wadekar INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \ 210*948c090dSVarun Wadekar cpu %d, unknown smc\n", 211*948c090dSVarun Wadekar __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags, 212*948c090dSVarun Wadekar plat_my_core_pos()); 213*948c090dSVarun Wadekar SMC_RET1(handle, SMC_UNK); 214*948c090dSVarun Wadekar } else { 215*948c090dSVarun Wadekar switch (smc_fid) { 216*948c090dSVarun Wadekar case SMC_FC64_SET_FIQ_HANDLER: 217*948c090dSVarun Wadekar return trusty_set_fiq_handler(handle, x1, x2, x3); 218*948c090dSVarun Wadekar case SMC_FC64_GET_FIQ_REGS: 219*948c090dSVarun Wadekar return trusty_get_fiq_regs(handle); 220*948c090dSVarun Wadekar case SMC_FC_FIQ_EXIT: 221*948c090dSVarun Wadekar return trusty_fiq_exit(handle, x1, x2, x3); 222*948c090dSVarun Wadekar default: 223*948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, smc_fid, x1, 224*948c090dSVarun Wadekar x2, x3); 225*948c090dSVarun Wadekar SMC_RET1(handle, ret.r0); 226*948c090dSVarun Wadekar } 227*948c090dSVarun Wadekar } 228*948c090dSVarun Wadekar } 229*948c090dSVarun Wadekar 230*948c090dSVarun Wadekar static int32_t trusty_init(void) 231*948c090dSVarun Wadekar { 232*948c090dSVarun Wadekar void el3_exit(); 233*948c090dSVarun Wadekar entry_point_info_t *ep_info; 234*948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 235*948c090dSVarun Wadekar uint32_t cpu = plat_my_core_pos(); 236*948c090dSVarun Wadekar int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx), 237*948c090dSVarun Wadekar CTX_SPSR_EL3)); 238*948c090dSVarun Wadekar 239*948c090dSVarun Wadekar ep_info = bl31_plat_get_next_image_ep_info(SECURE); 240*948c090dSVarun Wadekar 241*948c090dSVarun Wadekar cm_el1_sysregs_context_save(NON_SECURE); 242*948c090dSVarun Wadekar 243*948c090dSVarun Wadekar cm_set_context(&ctx->cpu_ctx, SECURE); 244*948c090dSVarun Wadekar cm_init_my_context(ep_info); 245*948c090dSVarun Wadekar 246*948c090dSVarun Wadekar /* 247*948c090dSVarun Wadekar * Adjust secondary cpu entry point for 32 bit images to the 248*948c090dSVarun Wadekar * end of exeption vectors 249*948c090dSVarun Wadekar */ 250*948c090dSVarun Wadekar if ((cpu != 0) && (reg_width == MODE_RW_32)) { 251*948c090dSVarun Wadekar INFO("trusty: cpu %d, adjust entry point to 0x%lx\n", 252*948c090dSVarun Wadekar cpu, ep_info->pc + (1U << 5)); 253*948c090dSVarun Wadekar cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5)); 254*948c090dSVarun Wadekar } 255*948c090dSVarun Wadekar 256*948c090dSVarun Wadekar cm_el1_sysregs_context_restore(SECURE); 257*948c090dSVarun Wadekar cm_set_next_eret_context(SECURE); 258*948c090dSVarun Wadekar 259*948c090dSVarun Wadekar ctx->saved_security_state = ~0; /* initial saved state is invalid */ 260*948c090dSVarun Wadekar trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack); 261*948c090dSVarun Wadekar 262*948c090dSVarun Wadekar trusty_context_switch_helper(&ctx->saved_sp, 0, 0, 0, 0); 263*948c090dSVarun Wadekar 264*948c090dSVarun Wadekar cm_el1_sysregs_context_restore(NON_SECURE); 265*948c090dSVarun Wadekar cm_set_next_eret_context(NON_SECURE); 266*948c090dSVarun Wadekar 267*948c090dSVarun Wadekar return 0; 268*948c090dSVarun Wadekar } 269*948c090dSVarun Wadekar 270*948c090dSVarun Wadekar static void trusty_cpu_suspend(void) 271*948c090dSVarun Wadekar { 272*948c090dSVarun Wadekar struct args ret; 273*948c090dSVarun Wadekar unsigned int linear_id = plat_my_core_pos(); 274*948c090dSVarun Wadekar 275*948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, 0, 0, 0); 276*948c090dSVarun Wadekar if (ret.r0 != 0) { 277*948c090dSVarun Wadekar INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %ld\n", 278*948c090dSVarun Wadekar __func__, linear_id, ret.r0); 279*948c090dSVarun Wadekar } 280*948c090dSVarun Wadekar } 281*948c090dSVarun Wadekar 282*948c090dSVarun Wadekar static void trusty_cpu_resume(void) 283*948c090dSVarun Wadekar { 284*948c090dSVarun Wadekar struct args ret; 285*948c090dSVarun Wadekar unsigned int linear_id = plat_my_core_pos(); 286*948c090dSVarun Wadekar 287*948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, 0, 0, 0); 288*948c090dSVarun Wadekar if (ret.r0 != 0) { 289*948c090dSVarun Wadekar INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %ld\n", 290*948c090dSVarun Wadekar __func__, linear_id, ret.r0); 291*948c090dSVarun Wadekar } 292*948c090dSVarun Wadekar } 293*948c090dSVarun Wadekar 294*948c090dSVarun Wadekar static int32_t trusty_cpu_off_handler(uint64_t unused) 295*948c090dSVarun Wadekar { 296*948c090dSVarun Wadekar trusty_cpu_suspend(); 297*948c090dSVarun Wadekar 298*948c090dSVarun Wadekar return 0; 299*948c090dSVarun Wadekar } 300*948c090dSVarun Wadekar 301*948c090dSVarun Wadekar static void trusty_cpu_on_finish_handler(uint64_t unused) 302*948c090dSVarun Wadekar { 303*948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 304*948c090dSVarun Wadekar 305*948c090dSVarun Wadekar if (!ctx->saved_sp) { 306*948c090dSVarun Wadekar trusty_init(); 307*948c090dSVarun Wadekar } else { 308*948c090dSVarun Wadekar trusty_cpu_resume(); 309*948c090dSVarun Wadekar } 310*948c090dSVarun Wadekar } 311*948c090dSVarun Wadekar 312*948c090dSVarun Wadekar static void trusty_cpu_suspend_handler(uint64_t unused) 313*948c090dSVarun Wadekar { 314*948c090dSVarun Wadekar trusty_cpu_suspend(); 315*948c090dSVarun Wadekar } 316*948c090dSVarun Wadekar 317*948c090dSVarun Wadekar static void trusty_cpu_suspend_finish_handler(uint64_t unused) 318*948c090dSVarun Wadekar { 319*948c090dSVarun Wadekar trusty_cpu_resume(); 320*948c090dSVarun Wadekar } 321*948c090dSVarun Wadekar 322*948c090dSVarun Wadekar static const spd_pm_ops_t trusty_pm = { 323*948c090dSVarun Wadekar .svc_off = trusty_cpu_off_handler, 324*948c090dSVarun Wadekar .svc_suspend = trusty_cpu_suspend_handler, 325*948c090dSVarun Wadekar .svc_on_finish = trusty_cpu_on_finish_handler, 326*948c090dSVarun Wadekar .svc_suspend_finish = trusty_cpu_suspend_finish_handler, 327*948c090dSVarun Wadekar }; 328*948c090dSVarun Wadekar 329*948c090dSVarun Wadekar static int32_t trusty_setup(void) 330*948c090dSVarun Wadekar { 331*948c090dSVarun Wadekar entry_point_info_t *ep_info; 332*948c090dSVarun Wadekar uint32_t instr; 333*948c090dSVarun Wadekar uint32_t flags; 334*948c090dSVarun Wadekar int ret; 335*948c090dSVarun Wadekar int aarch32 = 0; 336*948c090dSVarun Wadekar 337*948c090dSVarun Wadekar ep_info = bl31_plat_get_next_image_ep_info(SECURE); 338*948c090dSVarun Wadekar if (!ep_info) { 339*948c090dSVarun Wadekar INFO("Trusty image missing.\n"); 340*948c090dSVarun Wadekar return -1; 341*948c090dSVarun Wadekar } 342*948c090dSVarun Wadekar 343*948c090dSVarun Wadekar instr = *(uint32_t *)ep_info->pc; 344*948c090dSVarun Wadekar 345*948c090dSVarun Wadekar if (instr >> 24 == 0xea) { 346*948c090dSVarun Wadekar INFO("trusty: Found 32 bit image\n"); 347*948c090dSVarun Wadekar aarch32 = 1; 348*948c090dSVarun Wadekar } else if (instr >> 8 == 0xd53810) { 349*948c090dSVarun Wadekar INFO("trusty: Found 64 bit image\n"); 350*948c090dSVarun Wadekar } else { 351*948c090dSVarun Wadekar INFO("trusty: Found unknown image, 0x%x\n", instr); 352*948c090dSVarun Wadekar } 353*948c090dSVarun Wadekar 354*948c090dSVarun Wadekar SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE); 355*948c090dSVarun Wadekar if (!aarch32) 356*948c090dSVarun Wadekar ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, 357*948c090dSVarun Wadekar DISABLE_ALL_EXCEPTIONS); 358*948c090dSVarun Wadekar else 359*948c090dSVarun Wadekar ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 360*948c090dSVarun Wadekar SPSR_E_LITTLE, 361*948c090dSVarun Wadekar DAIF_FIQ_BIT | 362*948c090dSVarun Wadekar DAIF_IRQ_BIT | 363*948c090dSVarun Wadekar DAIF_ABT_BIT); 364*948c090dSVarun Wadekar 365*948c090dSVarun Wadekar bl31_register_bl32_init(trusty_init); 366*948c090dSVarun Wadekar 367*948c090dSVarun Wadekar psci_register_spd_pm_hook(&trusty_pm); 368*948c090dSVarun Wadekar 369*948c090dSVarun Wadekar flags = 0; 370*948c090dSVarun Wadekar set_interrupt_rm_flag(flags, NON_SECURE); 371*948c090dSVarun Wadekar ret = register_interrupt_type_handler(INTR_TYPE_S_EL1, 372*948c090dSVarun Wadekar trusty_fiq_handler, 373*948c090dSVarun Wadekar flags); 374*948c090dSVarun Wadekar if (ret) 375*948c090dSVarun Wadekar ERROR("trusty: failed to register fiq handler, ret = %d\n", ret); 376*948c090dSVarun Wadekar 377*948c090dSVarun Wadekar return 0; 378*948c090dSVarun Wadekar } 379*948c090dSVarun Wadekar 380*948c090dSVarun Wadekar /* Define a SPD runtime service descriptor for fast SMC calls */ 381*948c090dSVarun Wadekar DECLARE_RT_SVC( 382*948c090dSVarun Wadekar trusty_fast, 383*948c090dSVarun Wadekar 384*948c090dSVarun Wadekar OEN_TOS_START, 385*948c090dSVarun Wadekar SMC_ENTITY_SECURE_MONITOR, 386*948c090dSVarun Wadekar SMC_TYPE_FAST, 387*948c090dSVarun Wadekar trusty_setup, 388*948c090dSVarun Wadekar trusty_smc_handler 389*948c090dSVarun Wadekar ); 390*948c090dSVarun Wadekar 391*948c090dSVarun Wadekar /* Define a SPD runtime service descriptor for standard SMC calls */ 392*948c090dSVarun Wadekar DECLARE_RT_SVC( 393*948c090dSVarun Wadekar trusty_std, 394*948c090dSVarun Wadekar 395*948c090dSVarun Wadekar OEN_TOS_START, 396*948c090dSVarun Wadekar SMC_ENTITY_SECURE_MONITOR, 397*948c090dSVarun Wadekar SMC_TYPE_STD, 398*948c090dSVarun Wadekar NULL, 399*948c090dSVarun Wadekar trusty_smc_handler 400*948c090dSVarun Wadekar ); 401