1*7c88f3f6SAchin Gupta /* 2*7c88f3f6SAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3*7c88f3f6SAchin Gupta * 4*7c88f3f6SAchin Gupta * Redistribution and use in source and binary forms, with or without 5*7c88f3f6SAchin Gupta * modification, are permitted provided that the following conditions are met: 6*7c88f3f6SAchin Gupta * 7*7c88f3f6SAchin Gupta * Redistributions of source code must retain the above copyright notice, this 8*7c88f3f6SAchin Gupta * list of conditions and the following disclaimer. 9*7c88f3f6SAchin Gupta * 10*7c88f3f6SAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 11*7c88f3f6SAchin Gupta * this list of conditions and the following disclaimer in the documentation 12*7c88f3f6SAchin Gupta * and/or other materials provided with the distribution. 13*7c88f3f6SAchin Gupta * 14*7c88f3f6SAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 15*7c88f3f6SAchin Gupta * to endorse or promote products derived from this software without specific 16*7c88f3f6SAchin Gupta * prior written permission. 17*7c88f3f6SAchin Gupta * 18*7c88f3f6SAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19*7c88f3f6SAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20*7c88f3f6SAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21*7c88f3f6SAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22*7c88f3f6SAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23*7c88f3f6SAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24*7c88f3f6SAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25*7c88f3f6SAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26*7c88f3f6SAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27*7c88f3f6SAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28*7c88f3f6SAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 29*7c88f3f6SAchin Gupta */ 30*7c88f3f6SAchin Gupta 31*7c88f3f6SAchin Gupta #include <bl32.h> 32*7c88f3f6SAchin Gupta #include <tsp.h> 33*7c88f3f6SAchin Gupta #include <arch_helpers.h> 34*7c88f3f6SAchin Gupta #include <stdio.h> 35*7c88f3f6SAchin Gupta #include <platform.h> 36*7c88f3f6SAchin Gupta #include <debug.h> 37*7c88f3f6SAchin Gupta #include <spinlock.h> 38*7c88f3f6SAchin Gupta 39*7c88f3f6SAchin Gupta /******************************************************************************* 40*7c88f3f6SAchin Gupta * Lock to control access to the console 41*7c88f3f6SAchin Gupta ******************************************************************************/ 42*7c88f3f6SAchin Gupta spinlock_t console_lock; 43*7c88f3f6SAchin Gupta 44*7c88f3f6SAchin Gupta /******************************************************************************* 45*7c88f3f6SAchin Gupta * Per cpu data structure to populate parameters for an SMC in C code and use 46*7c88f3f6SAchin Gupta * a pointer to this structure in assembler code to populate x0-x7 47*7c88f3f6SAchin Gupta ******************************************************************************/ 48*7c88f3f6SAchin Gupta static tsp_args tsp_smc_args[PLATFORM_CORE_COUNT]; 49*7c88f3f6SAchin Gupta 50*7c88f3f6SAchin Gupta /******************************************************************************* 51*7c88f3f6SAchin Gupta * Per cpu data structure to keep track of TSP activity 52*7c88f3f6SAchin Gupta ******************************************************************************/ 53*7c88f3f6SAchin Gupta static work_statistics tsp_stats[PLATFORM_CORE_COUNT]; 54*7c88f3f6SAchin Gupta 55*7c88f3f6SAchin Gupta /******************************************************************************* 56*7c88f3f6SAchin Gupta * Single reference to the various entry points exported by the test secure 57*7c88f3f6SAchin Gupta * payload. A single copy should suffice for all cpus as they are not expected 58*7c88f3f6SAchin Gupta * to change. 59*7c88f3f6SAchin Gupta ******************************************************************************/ 60*7c88f3f6SAchin Gupta static const entry_info tsp_entry_info = { 61*7c88f3f6SAchin Gupta tsp_fast_smc_entry, 62*7c88f3f6SAchin Gupta tsp_cpu_on_entry, 63*7c88f3f6SAchin Gupta tsp_cpu_off_entry, 64*7c88f3f6SAchin Gupta tsp_cpu_resume_entry, 65*7c88f3f6SAchin Gupta tsp_cpu_suspend_entry, 66*7c88f3f6SAchin Gupta }; 67*7c88f3f6SAchin Gupta 68*7c88f3f6SAchin Gupta static tsp_args *set_smc_args(uint64_t arg0, 69*7c88f3f6SAchin Gupta uint64_t arg1, 70*7c88f3f6SAchin Gupta uint64_t arg2, 71*7c88f3f6SAchin Gupta uint64_t arg3, 72*7c88f3f6SAchin Gupta uint64_t arg4, 73*7c88f3f6SAchin Gupta uint64_t arg5, 74*7c88f3f6SAchin Gupta uint64_t arg6, 75*7c88f3f6SAchin Gupta uint64_t arg7) 76*7c88f3f6SAchin Gupta { 77*7c88f3f6SAchin Gupta uint64_t mpidr = read_mpidr(); 78*7c88f3f6SAchin Gupta uint32_t linear_id; 79*7c88f3f6SAchin Gupta tsp_args *pcpu_smc_args; 80*7c88f3f6SAchin Gupta 81*7c88f3f6SAchin Gupta /* 82*7c88f3f6SAchin Gupta * Return to Secure Monitor by raising an SMC. The results of the 83*7c88f3f6SAchin Gupta * service are passed as an arguments to the SMC 84*7c88f3f6SAchin Gupta */ 85*7c88f3f6SAchin Gupta linear_id = platform_get_core_pos(mpidr); 86*7c88f3f6SAchin Gupta pcpu_smc_args = &tsp_smc_args[linear_id]; 87*7c88f3f6SAchin Gupta write_sp_arg(pcpu_smc_args, TSP_ARG0, arg0); 88*7c88f3f6SAchin Gupta write_sp_arg(pcpu_smc_args, TSP_ARG1, arg1); 89*7c88f3f6SAchin Gupta write_sp_arg(pcpu_smc_args, TSP_ARG2, arg2); 90*7c88f3f6SAchin Gupta write_sp_arg(pcpu_smc_args, TSP_ARG3, arg3); 91*7c88f3f6SAchin Gupta write_sp_arg(pcpu_smc_args, TSP_ARG4, arg4); 92*7c88f3f6SAchin Gupta write_sp_arg(pcpu_smc_args, TSP_ARG5, arg5); 93*7c88f3f6SAchin Gupta write_sp_arg(pcpu_smc_args, TSP_ARG6, arg6); 94*7c88f3f6SAchin Gupta write_sp_arg(pcpu_smc_args, TSP_ARG7, arg7); 95*7c88f3f6SAchin Gupta 96*7c88f3f6SAchin Gupta return pcpu_smc_args; 97*7c88f3f6SAchin Gupta } 98*7c88f3f6SAchin Gupta 99*7c88f3f6SAchin Gupta /******************************************************************************* 100*7c88f3f6SAchin Gupta * TSP main entry point where it gets the opportunity to initialize its secure 101*7c88f3f6SAchin Gupta * state/applications. Once the state is initialized, it must return to the 102*7c88f3f6SAchin Gupta * SPD with a pointer to the 'tsp_entry_info' structure. 103*7c88f3f6SAchin Gupta ******************************************************************************/ 104*7c88f3f6SAchin Gupta uint64_t tsp_main(void) 105*7c88f3f6SAchin Gupta { 106*7c88f3f6SAchin Gupta uint64_t mpidr = read_mpidr(); 107*7c88f3f6SAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr); 108*7c88f3f6SAchin Gupta 109*7c88f3f6SAchin Gupta #if DEBUG 110*7c88f3f6SAchin Gupta meminfo *mem_layout = bl32_plat_sec_mem_layout(); 111*7c88f3f6SAchin Gupta #endif 112*7c88f3f6SAchin Gupta 113*7c88f3f6SAchin Gupta /* Initialize the platform */ 114*7c88f3f6SAchin Gupta bl32_platform_setup(); 115*7c88f3f6SAchin Gupta 116*7c88f3f6SAchin Gupta /* Initialize secure/applications state here */ 117*7c88f3f6SAchin Gupta 118*7c88f3f6SAchin Gupta /* Update this cpu's statistics */ 119*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count++; 120*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count++; 121*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_on_count++; 122*7c88f3f6SAchin Gupta 123*7c88f3f6SAchin Gupta spin_lock(&console_lock); 124*7c88f3f6SAchin Gupta #if defined (__GNUC__) 125*7c88f3f6SAchin Gupta printf("TSP Built : %s, %s\n\r", __TIME__, __DATE__); 126*7c88f3f6SAchin Gupta #endif 127*7c88f3f6SAchin Gupta INFO("Total memory base : 0x%x\n", mem_layout->total_base); 128*7c88f3f6SAchin Gupta INFO("Total memory size : 0x%x bytes\n", mem_layout->total_size); 129*7c88f3f6SAchin Gupta INFO("Free memory base : 0x%x\n", mem_layout->free_base); 130*7c88f3f6SAchin Gupta INFO("Free memory size : 0x%x bytes\n", mem_layout->free_size); 131*7c88f3f6SAchin Gupta INFO("cpu 0x%x: %d smcs, %d erets %d cpu on requests\n", mpidr, 132*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count, 133*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count, 134*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_on_count); 135*7c88f3f6SAchin Gupta spin_unlock(&console_lock); 136*7c88f3f6SAchin Gupta 137*7c88f3f6SAchin Gupta /* 138*7c88f3f6SAchin Gupta * TODO: There is a massive assumption that the SPD and SP can see each 139*7c88f3f6SAchin Gupta * other's memory without issues so it is safe to pass pointers to 140*7c88f3f6SAchin Gupta * internal memory. Replace this with a shared communication buffer. 141*7c88f3f6SAchin Gupta */ 142*7c88f3f6SAchin Gupta return (uint64_t) &tsp_entry_info; 143*7c88f3f6SAchin Gupta } 144*7c88f3f6SAchin Gupta 145*7c88f3f6SAchin Gupta /******************************************************************************* 146*7c88f3f6SAchin Gupta * This function performs any remaining book keeping in the test secure payload 147*7c88f3f6SAchin Gupta * after this cpu's architectural state has been setup in response to an earlier 148*7c88f3f6SAchin Gupta * psci cpu_on request. 149*7c88f3f6SAchin Gupta ******************************************************************************/ 150*7c88f3f6SAchin Gupta tsp_args *tsp_cpu_on_main(void) 151*7c88f3f6SAchin Gupta { 152*7c88f3f6SAchin Gupta uint64_t mpidr = read_mpidr(); 153*7c88f3f6SAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr); 154*7c88f3f6SAchin Gupta 155*7c88f3f6SAchin Gupta /* Update this cpu's statistics */ 156*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count++; 157*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count++; 158*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_on_count++; 159*7c88f3f6SAchin Gupta 160*7c88f3f6SAchin Gupta spin_lock(&console_lock); 161*7c88f3f6SAchin Gupta printf("SP: cpu 0x%x turned on\n\r", mpidr); 162*7c88f3f6SAchin Gupta INFO("cpu 0x%x: %d smcs, %d erets %d cpu on requests\n", mpidr, 163*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count, 164*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count, 165*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_on_count); 166*7c88f3f6SAchin Gupta spin_unlock(&console_lock); 167*7c88f3f6SAchin Gupta 168*7c88f3f6SAchin Gupta /* Indicate to the SPD that we have completed turned ourselves on */ 169*7c88f3f6SAchin Gupta return set_smc_args(TSP_ON_DONE, 0, 0, 0, 0, 0, 0, 0); 170*7c88f3f6SAchin Gupta } 171*7c88f3f6SAchin Gupta 172*7c88f3f6SAchin Gupta /******************************************************************************* 173*7c88f3f6SAchin Gupta * This function performs any remaining book keeping in the test secure payload 174*7c88f3f6SAchin Gupta * before this cpu is turned off in response to a psci cpu_off request. 175*7c88f3f6SAchin Gupta ******************************************************************************/ 176*7c88f3f6SAchin Gupta tsp_args *tsp_cpu_off_main(uint64_t arg0, 177*7c88f3f6SAchin Gupta uint64_t arg1, 178*7c88f3f6SAchin Gupta uint64_t arg2, 179*7c88f3f6SAchin Gupta uint64_t arg3, 180*7c88f3f6SAchin Gupta uint64_t arg4, 181*7c88f3f6SAchin Gupta uint64_t arg5, 182*7c88f3f6SAchin Gupta uint64_t arg6, 183*7c88f3f6SAchin Gupta uint64_t arg7) 184*7c88f3f6SAchin Gupta { 185*7c88f3f6SAchin Gupta uint64_t mpidr = read_mpidr(); 186*7c88f3f6SAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr); 187*7c88f3f6SAchin Gupta 188*7c88f3f6SAchin Gupta /* Update this cpu's statistics */ 189*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count++; 190*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count++; 191*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_off_count++; 192*7c88f3f6SAchin Gupta 193*7c88f3f6SAchin Gupta spin_lock(&console_lock); 194*7c88f3f6SAchin Gupta printf("SP: cpu 0x%x off request\n\r", mpidr); 195*7c88f3f6SAchin Gupta INFO("cpu 0x%x: %d smcs, %d erets %d cpu off requests\n", mpidr, 196*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count, 197*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count, 198*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_off_count); 199*7c88f3f6SAchin Gupta spin_unlock(&console_lock); 200*7c88f3f6SAchin Gupta 201*7c88f3f6SAchin Gupta 202*7c88f3f6SAchin Gupta /* 203*7c88f3f6SAchin Gupta * Indicate to the SPD that we have completed 204*7c88f3f6SAchin Gupta * this initialisation request. 205*7c88f3f6SAchin Gupta */ 206*7c88f3f6SAchin Gupta return set_smc_args(TSP_OFF_DONE, 0, 0, 0, 0, 0, 0, 0); 207*7c88f3f6SAchin Gupta } 208*7c88f3f6SAchin Gupta 209*7c88f3f6SAchin Gupta /******************************************************************************* 210*7c88f3f6SAchin Gupta * This function performs any book keeping in the test secure payload before 211*7c88f3f6SAchin Gupta * this cpu's architectural state is saved in response to an earlier psci 212*7c88f3f6SAchin Gupta * cpu_suspend request. 213*7c88f3f6SAchin Gupta ******************************************************************************/ 214*7c88f3f6SAchin Gupta tsp_args *tsp_cpu_suspend_main(uint64_t power_state, 215*7c88f3f6SAchin Gupta uint64_t arg1, 216*7c88f3f6SAchin Gupta uint64_t arg2, 217*7c88f3f6SAchin Gupta uint64_t arg3, 218*7c88f3f6SAchin Gupta uint64_t arg4, 219*7c88f3f6SAchin Gupta uint64_t arg5, 220*7c88f3f6SAchin Gupta uint64_t arg6, 221*7c88f3f6SAchin Gupta uint64_t arg7) 222*7c88f3f6SAchin Gupta { 223*7c88f3f6SAchin Gupta uint64_t mpidr = read_mpidr(); 224*7c88f3f6SAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr); 225*7c88f3f6SAchin Gupta 226*7c88f3f6SAchin Gupta /* Update this cpu's statistics */ 227*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count++; 228*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count++; 229*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_suspend_count++; 230*7c88f3f6SAchin Gupta 231*7c88f3f6SAchin Gupta spin_lock(&console_lock); 232*7c88f3f6SAchin Gupta printf("SP: cpu 0x%x suspend request. power state: 0x%x\n\r", 233*7c88f3f6SAchin Gupta mpidr, power_state); 234*7c88f3f6SAchin Gupta INFO("cpu 0x%x: %d smcs, %d erets %d cpu suspend requests\n", mpidr, 235*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count, 236*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count, 237*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_suspend_count); 238*7c88f3f6SAchin Gupta spin_unlock(&console_lock); 239*7c88f3f6SAchin Gupta 240*7c88f3f6SAchin Gupta /* 241*7c88f3f6SAchin Gupta * Indicate to the SPD that we have completed 242*7c88f3f6SAchin Gupta * this initialisation request. 243*7c88f3f6SAchin Gupta */ 244*7c88f3f6SAchin Gupta return set_smc_args(TSP_SUSPEND_DONE, 0, 0, 0, 0, 0, 0, 0); 245*7c88f3f6SAchin Gupta } 246*7c88f3f6SAchin Gupta 247*7c88f3f6SAchin Gupta /******************************************************************************* 248*7c88f3f6SAchin Gupta * This function performs any book keeping in the test secure payload after this 249*7c88f3f6SAchin Gupta * cpu's architectural state has been restored after wakeup from an earlier psci 250*7c88f3f6SAchin Gupta * cpu_suspend request. 251*7c88f3f6SAchin Gupta ******************************************************************************/ 252*7c88f3f6SAchin Gupta tsp_args *tsp_cpu_resume_main(uint64_t suspend_level, 253*7c88f3f6SAchin Gupta uint64_t arg1, 254*7c88f3f6SAchin Gupta uint64_t arg2, 255*7c88f3f6SAchin Gupta uint64_t arg3, 256*7c88f3f6SAchin Gupta uint64_t arg4, 257*7c88f3f6SAchin Gupta uint64_t arg5, 258*7c88f3f6SAchin Gupta uint64_t arg6, 259*7c88f3f6SAchin Gupta uint64_t arg7) 260*7c88f3f6SAchin Gupta { 261*7c88f3f6SAchin Gupta uint64_t mpidr = read_mpidr(); 262*7c88f3f6SAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr); 263*7c88f3f6SAchin Gupta 264*7c88f3f6SAchin Gupta /* Update this cpu's statistics */ 265*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count++; 266*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count++; 267*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_resume_count++; 268*7c88f3f6SAchin Gupta 269*7c88f3f6SAchin Gupta spin_lock(&console_lock); 270*7c88f3f6SAchin Gupta printf("SP: cpu 0x%x resumed. suspend level %d \n\r", 271*7c88f3f6SAchin Gupta mpidr, suspend_level); 272*7c88f3f6SAchin Gupta INFO("cpu 0x%x: %d smcs, %d erets %d cpu suspend requests\n", mpidr, 273*7c88f3f6SAchin Gupta tsp_stats[linear_id].smc_count, 274*7c88f3f6SAchin Gupta tsp_stats[linear_id].eret_count, 275*7c88f3f6SAchin Gupta tsp_stats[linear_id].cpu_suspend_count); 276*7c88f3f6SAchin Gupta spin_unlock(&console_lock); 277*7c88f3f6SAchin Gupta 278*7c88f3f6SAchin Gupta /* 279*7c88f3f6SAchin Gupta * Indicate to the SPD that we have completed 280*7c88f3f6SAchin Gupta * this initialisation request. 281*7c88f3f6SAchin Gupta */ 282*7c88f3f6SAchin Gupta return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0); 283*7c88f3f6SAchin Gupta } 284*7c88f3f6SAchin Gupta 285*7c88f3f6SAchin Gupta /******************************************************************************* 286*7c88f3f6SAchin Gupta * TSP fast smc handler. The secure monitor jumps to this function by 287*7c88f3f6SAchin Gupta * doing the ERET after populating X0-X7 registers. The arguments are received 288*7c88f3f6SAchin Gupta * in the function arguments in order. Once the service is rendered, this 289*7c88f3f6SAchin Gupta * function returns to Secure Monitor by raising SMC 290*7c88f3f6SAchin Gupta ******************************************************************************/ 291*7c88f3f6SAchin Gupta tsp_args *tsp_fast_smc_handler(uint64_t func, 292*7c88f3f6SAchin Gupta uint64_t arg1, 293*7c88f3f6SAchin Gupta uint64_t arg2, 294*7c88f3f6SAchin Gupta uint64_t arg3, 295*7c88f3f6SAchin Gupta uint64_t arg4, 296*7c88f3f6SAchin Gupta uint64_t arg5, 297*7c88f3f6SAchin Gupta uint64_t arg6, 298*7c88f3f6SAchin Gupta uint64_t arg7) 299*7c88f3f6SAchin Gupta { 300*7c88f3f6SAchin Gupta uint64_t results[4]; 301*7c88f3f6SAchin Gupta uint64_t service_args[4]; 302*7c88f3f6SAchin Gupta 303*7c88f3f6SAchin Gupta INFO("Received fast smc 0x%x on cpu 0x%x\n", func, read_mpidr()); 304*7c88f3f6SAchin Gupta 305*7c88f3f6SAchin Gupta /* Render sercure services and obtain results here */ 306*7c88f3f6SAchin Gupta 307*7c88f3f6SAchin Gupta results[0] = arg1; 308*7c88f3f6SAchin Gupta results[1] = arg2; 309*7c88f3f6SAchin Gupta results[2] = arg3; 310*7c88f3f6SAchin Gupta results[3] = arg4; 311*7c88f3f6SAchin Gupta 312*7c88f3f6SAchin Gupta /* 313*7c88f3f6SAchin Gupta * Request a service back from dispatcher/secure monitor. This call 314*7c88f3f6SAchin Gupta * return and thereafter resume exectuion 315*7c88f3f6SAchin Gupta */ 316*7c88f3f6SAchin Gupta tsp_get_magic(service_args); 317*7c88f3f6SAchin Gupta 318*7c88f3f6SAchin Gupta /* Determine the function to perform based on the function ID */ 319*7c88f3f6SAchin Gupta switch (func) { 320*7c88f3f6SAchin Gupta case TSP_FID_ADD: 321*7c88f3f6SAchin Gupta results[0] += service_args[0]; 322*7c88f3f6SAchin Gupta results[1] += service_args[1]; 323*7c88f3f6SAchin Gupta results[2] += service_args[2]; 324*7c88f3f6SAchin Gupta results[3] += service_args[3]; 325*7c88f3f6SAchin Gupta break; 326*7c88f3f6SAchin Gupta case TSP_FID_SUB: 327*7c88f3f6SAchin Gupta results[0] -= service_args[0]; 328*7c88f3f6SAchin Gupta results[1] -= service_args[1]; 329*7c88f3f6SAchin Gupta results[2] -= service_args[2]; 330*7c88f3f6SAchin Gupta results[3] -= service_args[3]; 331*7c88f3f6SAchin Gupta break; 332*7c88f3f6SAchin Gupta case TSP_FID_MUL: 333*7c88f3f6SAchin Gupta results[0] *= service_args[0]; 334*7c88f3f6SAchin Gupta results[1] *= service_args[1]; 335*7c88f3f6SAchin Gupta results[2] *= service_args[2]; 336*7c88f3f6SAchin Gupta results[3] *= service_args[3]; 337*7c88f3f6SAchin Gupta break; 338*7c88f3f6SAchin Gupta case TSP_FID_DIV: 339*7c88f3f6SAchin Gupta results[0] /= service_args[0] ? service_args[0] : 1; 340*7c88f3f6SAchin Gupta results[1] /= service_args[1] ? service_args[1] : 1; 341*7c88f3f6SAchin Gupta results[2] /= service_args[2] ? service_args[2] : 1; 342*7c88f3f6SAchin Gupta results[3] /= service_args[3] ? service_args[3] : 1; 343*7c88f3f6SAchin Gupta break; 344*7c88f3f6SAchin Gupta default: 345*7c88f3f6SAchin Gupta break; 346*7c88f3f6SAchin Gupta } 347*7c88f3f6SAchin Gupta 348*7c88f3f6SAchin Gupta return set_smc_args(TSP_WORK_DONE, 349*7c88f3f6SAchin Gupta results[0], 350*7c88f3f6SAchin Gupta results[1], 351*7c88f3f6SAchin Gupta results[2], 352*7c88f3f6SAchin Gupta results[3], 353*7c88f3f6SAchin Gupta 0, 0, 0); 354*7c88f3f6SAchin Gupta } 355*7c88f3f6SAchin Gupta 356