1c11ba852SSoby Mathew/* 2*6dc5979aSYann Gautier * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved. 3c11ba852SSoby Mathew * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 5c11ba852SSoby Mathew */ 6c11ba852SSoby Mathew 7c11ba852SSoby Mathew#include <arch.h> 8c11ba852SSoby Mathew#include <asm_macros.S> 909d40e0eSAntonio Nino Diaz#include <common/bl_common.h> 1009d40e0eSAntonio Nino Diaz#include <common/runtime_svc.h> 11c11ba852SSoby Mathew#include <context.h> 123bdf0e5dSYatharth Kochar#include <el3_common_macros.S> 130531ada5SBence Szépkúti#include <lib/el3_runtime/cpu_data.h> 140531ada5SBence Szépkúti#include <lib/pmf/aarch32/pmf_asm_macros.S> 150531ada5SBence Szépkúti#include <lib/runtime_instr.h> 1609d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h> 17085e80ecSAntonio Nino Diaz#include <smccc_helpers.h> 18085e80ecSAntonio Nino Diaz#include <smccc_macros.S> 19c11ba852SSoby Mathew 20c11ba852SSoby Mathew .globl sp_min_vector_table 21c11ba852SSoby Mathew .globl sp_min_entrypoint 22c11ba852SSoby Mathew .globl sp_min_warm_entrypoint 237343505dSDimitris Papastamos .globl sp_min_handle_smc 247343505dSDimitris Papastamos .globl sp_min_handle_fiq 25c11ba852SSoby Mathew 264324a14bSYann Gautier#define FIXUP_SIZE ((BL32_LIMIT) - (BL32_BASE)) 274324a14bSYann Gautier 2871816096SEtienne Carriere .macro route_fiq_to_sp_min reg 2971816096SEtienne Carriere /* ----------------------------------------------------- 3071816096SEtienne Carriere * FIQs are secure interrupts trapped by Monitor and non 3171816096SEtienne Carriere * secure is not allowed to mask the FIQs. 3271816096SEtienne Carriere * ----------------------------------------------------- 3371816096SEtienne Carriere */ 3471816096SEtienne Carriere ldcopr \reg, SCR 3571816096SEtienne Carriere orr \reg, \reg, #SCR_FIQ_BIT 3671816096SEtienne Carriere bic \reg, \reg, #SCR_FW_BIT 3771816096SEtienne Carriere stcopr \reg, SCR 3871816096SEtienne Carriere .endm 393bdf0e5dSYatharth Kochar 4070896274SEtienne Carriere .macro clrex_on_monitor_entry 4170896274SEtienne Carriere#if (ARM_ARCH_MAJOR == 7) 4270896274SEtienne Carriere /* 4370896274SEtienne Carriere * ARMv7 architectures need to clear the exclusive access when 4470896274SEtienne Carriere * entering Monitor mode. 4570896274SEtienne Carriere */ 4670896274SEtienne Carriere clrex 4770896274SEtienne Carriere#endif 4870896274SEtienne Carriere .endm 4970896274SEtienne Carriere 503bdf0e5dSYatharth Kocharvector_base sp_min_vector_table 51c11ba852SSoby Mathew b sp_min_entrypoint 52c11ba852SSoby Mathew b plat_panic_handler /* Undef */ 537343505dSDimitris Papastamos b sp_min_handle_smc /* Syscall */ 54*6dc5979aSYann Gautier b report_prefetch_abort /* Prefetch abort */ 55*6dc5979aSYann Gautier b report_data_abort /* Data abort */ 56c11ba852SSoby Mathew b plat_panic_handler /* Reserved */ 57c11ba852SSoby Mathew b plat_panic_handler /* IRQ */ 587343505dSDimitris Papastamos b sp_min_handle_fiq /* FIQ */ 59c11ba852SSoby Mathew 60c11ba852SSoby Mathew 61c11ba852SSoby Mathew/* 62c11ba852SSoby Mathew * The Cold boot/Reset entrypoint for SP_MIN 63c11ba852SSoby Mathew */ 64c11ba852SSoby Mathewfunc sp_min_entrypoint 653bdf0e5dSYatharth Kochar#if !RESET_TO_SP_MIN 663bdf0e5dSYatharth Kochar /* --------------------------------------------------------------- 673bdf0e5dSYatharth Kochar * Preceding bootloader has populated r0 with a pointer to a 683bdf0e5dSYatharth Kochar * 'bl_params_t' structure & r1 with a pointer to platform 693bdf0e5dSYatharth Kochar * specific structure 703bdf0e5dSYatharth Kochar * --------------------------------------------------------------- 71c11ba852SSoby Mathew */ 72a6f340feSSoby Mathew mov r9, r0 73a6f340feSSoby Mathew mov r10, r1 74a6f340feSSoby Mathew mov r11, r2 75a6f340feSSoby Mathew mov r12, r3 76c11ba852SSoby Mathew 773bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 783bdf0e5dSYatharth Kochar * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches 793bdf0e5dSYatharth Kochar * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot 803bdf0e5dSYatharth Kochar * and primary/secondary CPU logic should not be executed in this case. 813bdf0e5dSYatharth Kochar * 8218f2efd6SDavid Cunado * Also, assume that the previous bootloader has already initialised the 8318f2efd6SDavid Cunado * SCTLR, including the CPU endianness, and has initialised the memory. 843bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 85c11ba852SSoby Mathew */ 863bdf0e5dSYatharth Kochar el3_entrypoint_common \ 8718f2efd6SDavid Cunado _init_sctlr=0 \ 883bdf0e5dSYatharth Kochar _warm_boot_mailbox=0 \ 893bdf0e5dSYatharth Kochar _secondary_cold_boot=0 \ 903bdf0e5dSYatharth Kochar _init_memory=0 \ 913bdf0e5dSYatharth Kochar _init_c_runtime=1 \ 924324a14bSYann Gautier _exception_vectors=sp_min_vector_table \ 934324a14bSYann Gautier _pie_fixup_size=FIXUP_SIZE 94c11ba852SSoby Mathew 953bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 963bdf0e5dSYatharth Kochar * Relay the previous bootloader's arguments to the platform layer 973bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 98c11ba852SSoby Mathew */ 993bdf0e5dSYatharth Kochar#else 1003bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 1013bdf0e5dSYatharth Kochar * For RESET_TO_SP_MIN systems which have a programmable reset address, 1023bdf0e5dSYatharth Kochar * sp_min_entrypoint() is executed only on the cold boot path so we can 1033bdf0e5dSYatharth Kochar * skip the warm boot mailbox mechanism. 1043bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 105c11ba852SSoby Mathew */ 1063bdf0e5dSYatharth Kochar el3_entrypoint_common \ 10718f2efd6SDavid Cunado _init_sctlr=1 \ 1083bdf0e5dSYatharth Kochar _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \ 1093bdf0e5dSYatharth Kochar _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \ 1103bdf0e5dSYatharth Kochar _init_memory=1 \ 1113bdf0e5dSYatharth Kochar _init_c_runtime=1 \ 1124324a14bSYann Gautier _exception_vectors=sp_min_vector_table \ 1134324a14bSYann Gautier _pie_fixup_size=FIXUP_SIZE 114c11ba852SSoby Mathew 1153bdf0e5dSYatharth Kochar /* --------------------------------------------------------------------- 1163bdf0e5dSYatharth Kochar * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader 1173bdf0e5dSYatharth Kochar * to run so there's no argument to relay from a previous bootloader. 1183bdf0e5dSYatharth Kochar * Zero the arguments passed to the platform layer to reflect that. 1193bdf0e5dSYatharth Kochar * --------------------------------------------------------------------- 120c11ba852SSoby Mathew */ 121a6f340feSSoby Mathew mov r9, #0 122a6f340feSSoby Mathew mov r10, #0 123a6f340feSSoby Mathew mov r11, #0 124a6f340feSSoby Mathew mov r12, #0 125a6f340feSSoby Mathew 1263bdf0e5dSYatharth Kochar#endif /* RESET_TO_SP_MIN */ 127c11ba852SSoby Mathew 12871816096SEtienne Carriere#if SP_MIN_WITH_SECURE_FIQ 12971816096SEtienne Carriere route_fiq_to_sp_min r4 13071816096SEtienne Carriere#endif 13171816096SEtienne Carriere 132a6f340feSSoby Mathew mov r0, r9 133a6f340feSSoby Mathew mov r1, r10 134a6f340feSSoby Mathew mov r2, r11 135a6f340feSSoby Mathew mov r3, r12 136a6f340feSSoby Mathew bl sp_min_early_platform_setup2 137c11ba852SSoby Mathew bl sp_min_plat_arch_setup 138c11ba852SSoby Mathew 139c11ba852SSoby Mathew /* Jump to the main function */ 140c11ba852SSoby Mathew bl sp_min_main 141c11ba852SSoby Mathew 142c11ba852SSoby Mathew /* ------------------------------------------------------------- 143c11ba852SSoby Mathew * Clean the .data & .bss sections to main memory. This ensures 144c11ba852SSoby Mathew * that any global data which was initialised by the primary CPU 145c11ba852SSoby Mathew * is visible to secondary CPUs before they enable their data 146c11ba852SSoby Mathew * caches and participate in coherency. 147c11ba852SSoby Mathew * ------------------------------------------------------------- 148c11ba852SSoby Mathew */ 149c11ba852SSoby Mathew ldr r0, =__DATA_START__ 150c11ba852SSoby Mathew ldr r1, =__DATA_END__ 151c11ba852SSoby Mathew sub r1, r1, r0 152c11ba852SSoby Mathew bl clean_dcache_range 153c11ba852SSoby Mathew 154c11ba852SSoby Mathew ldr r0, =__BSS_START__ 155c11ba852SSoby Mathew ldr r1, =__BSS_END__ 156c11ba852SSoby Mathew sub r1, r1, r0 157c11ba852SSoby Mathew bl clean_dcache_range 158c11ba852SSoby Mathew 159c11ba852SSoby Mathew bl smc_get_next_ctx 160b6285d64SSoby Mathew 161b6285d64SSoby Mathew /* r0 points to `smc_ctx_t` */ 162b6285d64SSoby Mathew /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 163c11ba852SSoby Mathew b sp_min_exit 164c11ba852SSoby Mathewendfunc sp_min_entrypoint 165c11ba852SSoby Mathew 1663bdf0e5dSYatharth Kochar 1673bdf0e5dSYatharth Kochar/* 1683bdf0e5dSYatharth Kochar * SMC handling function for SP_MIN. 1693bdf0e5dSYatharth Kochar */ 1707343505dSDimitris Papastamosfunc sp_min_handle_smc 171b6285d64SSoby Mathew /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 172b6285d64SSoby Mathew str lr, [sp, #SMC_CTX_LR_MON] 173b6285d64SSoby Mathew 1740531ada5SBence Szépkúti#if ENABLE_RUNTIME_INSTRUMENTATION 1750531ada5SBence Szépkúti /* 1760531ada5SBence Szépkúti * Read the timestamp value and store it on top of the C runtime stack. 1770531ada5SBence Szépkúti * The value will be saved to the per-cpu data once the C stack is 1780531ada5SBence Szépkúti * available, as a valid stack is needed to call _cpu_data() 1790531ada5SBence Szépkúti */ 1800531ada5SBence Szépkúti strd r0, r1, [sp, #SMC_CTX_GPREG_R0] 1810531ada5SBence Szépkúti ldcopr16 r0, r1, CNTPCT_64 1820531ada5SBence Szépkúti ldr lr, [sp, #SMC_CTX_SP_MON] 1830531ada5SBence Szépkúti strd r0, r1, [lr, #-8]! 1840531ada5SBence Szépkúti str lr, [sp, #SMC_CTX_SP_MON] 1850531ada5SBence Szépkúti ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0] 1860531ada5SBence Szépkúti#endif 1870531ada5SBence Szépkúti 188085e80ecSAntonio Nino Diaz smccc_save_gp_mode_regs 1893bdf0e5dSYatharth Kochar 19070896274SEtienne Carriere clrex_on_monitor_entry 19170896274SEtienne Carriere 1929f3ee61cSSoby Mathew /* 193b6285d64SSoby Mathew * `sp` still points to `smc_ctx_t`. Save it to a register 194b6285d64SSoby Mathew * and restore the C runtime stack pointer to `sp`. 1959f3ee61cSSoby Mathew */ 196b6285d64SSoby Mathew mov r2, sp /* handle */ 197b6285d64SSoby Mathew ldr sp, [r2, #SMC_CTX_SP_MON] 198b6285d64SSoby Mathew 1990531ada5SBence Szépkúti#if ENABLE_RUNTIME_INSTRUMENTATION 2000531ada5SBence Szépkúti /* Save handle to a callee saved register */ 2010531ada5SBence Szépkúti mov r6, r2 2020531ada5SBence Szépkúti 2030531ada5SBence Szépkúti /* 2040531ada5SBence Szépkúti * Restore the timestamp value and store it in per-cpu data. The value 2050531ada5SBence Szépkúti * will be extracted from per-cpu data by the C level SMC handler and 2060531ada5SBence Szépkúti * saved to the PMF timestamp region. 2070531ada5SBence Szépkúti */ 2080531ada5SBence Szépkúti ldrd r4, r5, [sp], #8 2090531ada5SBence Szépkúti bl _cpu_data 2100531ada5SBence Szépkúti strd r4, r5, [r0, #CPU_DATA_PMF_TS0_OFFSET] 2110531ada5SBence Szépkúti 2120531ada5SBence Szépkúti /* Restore handle */ 2130531ada5SBence Szépkúti mov r2, r6 2140531ada5SBence Szépkúti#endif 2150531ada5SBence Szépkúti 216b6285d64SSoby Mathew ldr r0, [r2, #SMC_CTX_SCR] 2173bdf0e5dSYatharth Kochar and r3, r0, #SCR_NS_BIT /* flags */ 2183bdf0e5dSYatharth Kochar 2193bdf0e5dSYatharth Kochar /* Switch to Secure Mode*/ 2203bdf0e5dSYatharth Kochar bic r0, #SCR_NS_BIT 2213bdf0e5dSYatharth Kochar stcopr r0, SCR 2223bdf0e5dSYatharth Kochar isb 223b6285d64SSoby Mathew 2243bdf0e5dSYatharth Kochar ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ 2253bdf0e5dSYatharth Kochar /* Check whether an SMC64 is issued */ 2263bdf0e5dSYatharth Kochar tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) 227b6285d64SSoby Mathew beq 1f 228b6285d64SSoby Mathew /* SMC32 is not detected. Return error back to caller */ 2293bdf0e5dSYatharth Kochar mov r0, #SMC_UNK 2303bdf0e5dSYatharth Kochar str r0, [r2, #SMC_CTX_GPREG_R0] 2313bdf0e5dSYatharth Kochar mov r0, r2 232b6285d64SSoby Mathew b sp_min_exit 2333bdf0e5dSYatharth Kochar1: 234b6285d64SSoby Mathew /* SMC32 is detected */ 2353bdf0e5dSYatharth Kochar mov r1, #0 /* cookie */ 2363bdf0e5dSYatharth Kochar bl handle_runtime_svc 2373bdf0e5dSYatharth Kochar 238b6285d64SSoby Mathew /* `r0` points to `smc_ctx_t` */ 2393bdf0e5dSYatharth Kochar b sp_min_exit 2407343505dSDimitris Papastamosendfunc sp_min_handle_smc 2413bdf0e5dSYatharth Kochar 242c11ba852SSoby Mathew/* 24371816096SEtienne Carriere * Secure Interrupts handling function for SP_MIN. 24471816096SEtienne Carriere */ 2457343505dSDimitris Papastamosfunc sp_min_handle_fiq 24671816096SEtienne Carriere#if !SP_MIN_WITH_SECURE_FIQ 24771816096SEtienne Carriere b plat_panic_handler 24871816096SEtienne Carriere#else 24971816096SEtienne Carriere /* FIQ has a +4 offset for lr compared to preferred return address */ 25071816096SEtienne Carriere sub lr, lr, #4 25171816096SEtienne Carriere /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */ 25271816096SEtienne Carriere str lr, [sp, #SMC_CTX_LR_MON] 25371816096SEtienne Carriere 254085e80ecSAntonio Nino Diaz smccc_save_gp_mode_regs 25571816096SEtienne Carriere 25670896274SEtienne Carriere clrex_on_monitor_entry 25771816096SEtienne Carriere 25871816096SEtienne Carriere /* load run-time stack */ 25971816096SEtienne Carriere mov r2, sp 26071816096SEtienne Carriere ldr sp, [r2, #SMC_CTX_SP_MON] 26171816096SEtienne Carriere 26271816096SEtienne Carriere /* Switch to Secure Mode */ 26371816096SEtienne Carriere ldr r0, [r2, #SMC_CTX_SCR] 26471816096SEtienne Carriere bic r0, #SCR_NS_BIT 26571816096SEtienne Carriere stcopr r0, SCR 26671816096SEtienne Carriere isb 26771816096SEtienne Carriere 26871816096SEtienne Carriere push {r2, r3} 26971816096SEtienne Carriere bl sp_min_fiq 27071816096SEtienne Carriere pop {r0, r3} 27171816096SEtienne Carriere 27271816096SEtienne Carriere b sp_min_exit 27371816096SEtienne Carriere#endif 2747343505dSDimitris Papastamosendfunc sp_min_handle_fiq 27571816096SEtienne Carriere 27671816096SEtienne Carriere/* 277c11ba852SSoby Mathew * The Warm boot entrypoint for SP_MIN. 278c11ba852SSoby Mathew */ 279c11ba852SSoby Mathewfunc sp_min_warm_entrypoint 2800531ada5SBence Szépkúti#if ENABLE_RUNTIME_INSTRUMENTATION 2810531ada5SBence Szépkúti /* 2820531ada5SBence Szépkúti * This timestamp update happens with cache off. The next 2830531ada5SBence Szépkúti * timestamp collection will need to do cache maintenance prior 2840531ada5SBence Szépkúti * to timestamp update. 2850531ada5SBence Szépkúti */ 2860531ada5SBence Szépkúti pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR 2870531ada5SBence Szépkúti ldcopr16 r2, r3, CNTPCT_64 2880531ada5SBence Szépkúti strd r2, r3, [r0] 2890531ada5SBence Szépkúti#endif 2903bdf0e5dSYatharth Kochar /* 2913bdf0e5dSYatharth Kochar * On the warm boot path, most of the EL3 initialisations performed by 2923bdf0e5dSYatharth Kochar * 'el3_entrypoint_common' must be skipped: 2933bdf0e5dSYatharth Kochar * 2943bdf0e5dSYatharth Kochar * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by 29518f2efd6SDavid Cunado * programming the reset address do we need to initialied the SCTLR. 2963bdf0e5dSYatharth Kochar * In other cases, we assume this has been taken care by the 2973bdf0e5dSYatharth Kochar * entrypoint code. 2983bdf0e5dSYatharth Kochar * 2993bdf0e5dSYatharth Kochar * - No need to determine the type of boot, we know it is a warm boot. 3003bdf0e5dSYatharth Kochar * 3013bdf0e5dSYatharth Kochar * - Do not try to distinguish between primary and secondary CPUs, this 3023bdf0e5dSYatharth Kochar * notion only exists for a cold boot. 3033bdf0e5dSYatharth Kochar * 3043bdf0e5dSYatharth Kochar * - No need to initialise the memory or the C runtime environment, 3053bdf0e5dSYatharth Kochar * it has been done once and for all on the cold boot path. 3063bdf0e5dSYatharth Kochar */ 3073bdf0e5dSYatharth Kochar el3_entrypoint_common \ 30818f2efd6SDavid Cunado _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \ 3093bdf0e5dSYatharth Kochar _warm_boot_mailbox=0 \ 3103bdf0e5dSYatharth Kochar _secondary_cold_boot=0 \ 3113bdf0e5dSYatharth Kochar _init_memory=0 \ 3123bdf0e5dSYatharth Kochar _init_c_runtime=0 \ 3134324a14bSYann Gautier _exception_vectors=sp_min_vector_table \ 3144324a14bSYann Gautier _pie_fixup_size=0 315c11ba852SSoby Mathew 31625a93f7cSJeenu Viswambharan /* 31725a93f7cSJeenu Viswambharan * We're about to enable MMU and participate in PSCI state coordination. 31825a93f7cSJeenu Viswambharan * 31925a93f7cSJeenu Viswambharan * The PSCI implementation invokes platform routines that enable CPUs to 32025a93f7cSJeenu Viswambharan * participate in coherency. On a system where CPUs are not 321bcc3c49cSSoby Mathew * cache-coherent without appropriate platform specific programming, 322bcc3c49cSSoby Mathew * having caches enabled until such time might lead to coherency issues 323bcc3c49cSSoby Mathew * (resulting from stale data getting speculatively fetched, among 324bcc3c49cSSoby Mathew * others). Therefore we keep data caches disabled even after enabling 325bcc3c49cSSoby Mathew * the MMU for such platforms. 32625a93f7cSJeenu Viswambharan * 327bcc3c49cSSoby Mathew * On systems with hardware-assisted coherency, or on single cluster 328bcc3c49cSSoby Mathew * platforms, such platform specific programming is not required to 329bcc3c49cSSoby Mathew * enter coherency (as CPUs already are); and there's no reason to have 330bcc3c49cSSoby Mathew * caches disabled either. 331c11ba852SSoby Mathew */ 33264ee263eSJeenu Viswambharan#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY 33364ee263eSJeenu Viswambharan mov r0, #0 33464ee263eSJeenu Viswambharan#else 335c11ba852SSoby Mathew mov r0, #DISABLE_DCACHE 33664ee263eSJeenu Viswambharan#endif 337c11ba852SSoby Mathew bl bl32_plat_enable_mmu 338c11ba852SSoby Mathew 33971816096SEtienne Carriere#if SP_MIN_WITH_SECURE_FIQ 34071816096SEtienne Carriere route_fiq_to_sp_min r0 34171816096SEtienne Carriere#endif 34271816096SEtienne Carriere 343c11ba852SSoby Mathew bl sp_min_warm_boot 344c11ba852SSoby Mathew bl smc_get_next_ctx 345b6285d64SSoby Mathew /* r0 points to `smc_ctx_t` */ 346b6285d64SSoby Mathew /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */ 3470531ada5SBence Szépkúti 3480531ada5SBence Szépkúti#if ENABLE_RUNTIME_INSTRUMENTATION 3490531ada5SBence Szépkúti /* Save smc_ctx_t */ 3500531ada5SBence Szépkúti mov r5, r0 3510531ada5SBence Szépkúti 3520531ada5SBence Szépkúti pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI 3530531ada5SBence Szépkúti mov r4, r0 3540531ada5SBence Szépkúti 3550531ada5SBence Szépkúti /* 3560531ada5SBence Szépkúti * Invalidate before updating timestamp to ensure previous timestamp 3570531ada5SBence Szépkúti * updates on the same cache line with caches disabled are properly 3580531ada5SBence Szépkúti * seen by the same core. Without the cache invalidate, the core might 3590531ada5SBence Szépkúti * write into a stale cache line. 3600531ada5SBence Szépkúti */ 3610531ada5SBence Szépkúti mov r1, #PMF_TS_SIZE 3620531ada5SBence Szépkúti bl inv_dcache_range 3630531ada5SBence Szépkúti 3640531ada5SBence Szépkúti ldcopr16 r0, r1, CNTPCT_64 3650531ada5SBence Szépkúti strd r0, r1, [r4] 3660531ada5SBence Szépkúti 3670531ada5SBence Szépkúti /* Restore smc_ctx_t */ 3680531ada5SBence Szépkúti mov r0, r5 3690531ada5SBence Szépkúti#endif 3700531ada5SBence Szépkúti 371c11ba852SSoby Mathew b sp_min_exit 372c11ba852SSoby Mathewendfunc sp_min_warm_entrypoint 373c11ba852SSoby Mathew 374c11ba852SSoby Mathew/* 375c11ba852SSoby Mathew * The function to restore the registers from SMC context and return 376c11ba852SSoby Mathew * to the mode restored to SPSR. 377c11ba852SSoby Mathew * 378c11ba852SSoby Mathew * Arguments : r0 must point to the SMC context to restore from. 379c11ba852SSoby Mathew */ 380c11ba852SSoby Mathewfunc sp_min_exit 381b6285d64SSoby Mathew monitor_exit 382c11ba852SSoby Mathewendfunc sp_min_exit 383