1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) 4*4882a593Smuzhiyun * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Vineetg: March 2009 (Supporting 2 levels of Interrupts) 7*4882a593Smuzhiyun * Stack switching code can no longer reliably rely on the fact that 8*4882a593Smuzhiyun * if we are NOT in user mode, stack is switched to kernel mode. 9*4882a593Smuzhiyun * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed 10*4882a593Smuzhiyun * it's prologue including stack switching from user mode 11*4882a593Smuzhiyun * 12*4882a593Smuzhiyun * Vineetg: Aug 28th 2008: Bug #94984 13*4882a593Smuzhiyun * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap 14*4882a593Smuzhiyun * Normally CPU does this automatically, however when doing FAKE rtie, 15*4882a593Smuzhiyun * we also need to explicitly do this. The problem in macros 16*4882a593Smuzhiyun * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit 17*4882a593Smuzhiyun * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context 18*4882a593Smuzhiyun * 19*4882a593Smuzhiyun * Vineetg: May 5th 2008 20*4882a593Smuzhiyun * -Modified CALLEE_REG save/restore macros to handle the fact that 21*4882a593Smuzhiyun * r25 contains the kernel current task ptr 22*4882a593Smuzhiyun * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs 23*4882a593Smuzhiyun * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the 24*4882a593Smuzhiyun * address Write back load ld.ab instead of seperate ld/add instn 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 27*4882a593Smuzhiyun */ 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun #ifndef __ASM_ARC_ENTRY_COMPACT_H 30*4882a593Smuzhiyun #define __ASM_ARC_ENTRY_COMPACT_H 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun #include <asm/asm-offsets.h> 33*4882a593Smuzhiyun #include <asm/irqflags-compact.h> 34*4882a593Smuzhiyun #include <asm/thread_info.h> /* For THREAD_SIZE */ 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun /*-------------------------------------------------------------- 37*4882a593Smuzhiyun * Switch to Kernel Mode stack if SP points to User Mode stack 38*4882a593Smuzhiyun * 39*4882a593Smuzhiyun * Entry : r9 contains pre-IRQ/exception/trap status32 40*4882a593Smuzhiyun * Exit : SP set to K mode stack 41*4882a593Smuzhiyun * SP at the time of entry (K/U) saved @ pt_regs->sp 42*4882a593Smuzhiyun * Clobbers: r9 43*4882a593Smuzhiyun *-------------------------------------------------------------*/ 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun .macro SWITCH_TO_KERNEL_STK 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun /* User Mode when this happened ? Yes: Proceed to switch stack */ 48*4882a593Smuzhiyun bbit1 r9, STATUS_U_BIT, 88f 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun /* OK we were already in kernel mode when this event happened, thus can 51*4882a593Smuzhiyun * assume SP is kernel mode SP. _NO_ need to do any stack switching 52*4882a593Smuzhiyun */ 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS 55*4882a593Smuzhiyun /* However.... 56*4882a593Smuzhiyun * If Level 2 Interrupts enabled, we may end up with a corner case: 57*4882a593Smuzhiyun * 1. User Task executing 58*4882a593Smuzhiyun * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode) 59*4882a593Smuzhiyun * 3. But before it could switch SP from USER to KERNEL stack 60*4882a593Smuzhiyun * a L2 IRQ "Interrupts" L1 61*4882a593Smuzhiyun * Thay way although L2 IRQ happened in Kernel mode, stack is still 62*4882a593Smuzhiyun * not switched. 63*4882a593Smuzhiyun * To handle this, we may need to switch stack even if in kernel mode 64*4882a593Smuzhiyun * provided SP has values in range of USER mode stack ( < 0x7000_0000 ) 65*4882a593Smuzhiyun */ 66*4882a593Smuzhiyun brlo sp, VMALLOC_START, 88f 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun /* TODO: vineetg: 69*4882a593Smuzhiyun * We need to be a bit more cautious here. What if a kernel bug in 70*4882a593Smuzhiyun * L1 ISR, caused SP to go whaco (some small value which looks like 71*4882a593Smuzhiyun * USER stk) and then we take L2 ISR. 72*4882a593Smuzhiyun * Above brlo alone would treat it as a valid L1-L2 scenario 73*4882a593Smuzhiyun * instead of shouting around 74*4882a593Smuzhiyun * The only feasible way is to make sure this L2 happened in 75*4882a593Smuzhiyun * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in 76*4882a593Smuzhiyun * L1 ISR before it switches stack 77*4882a593Smuzhiyun */ 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun #endif 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun /*------Intr/Ecxp happened in kernel mode, SP already setup ------ */ 82*4882a593Smuzhiyun /* save it nevertheless @ pt_regs->sp for uniformity */ 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun b.d 66f 85*4882a593Smuzhiyun st sp, [sp, PT_sp - SZ_PT_REGS] 86*4882a593Smuzhiyun 87*4882a593Smuzhiyun 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */ 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun GET_CURR_TASK_ON_CPU r9 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun /* With current tsk in r9, get it's kernel mode stack base */ 92*4882a593Smuzhiyun GET_TSK_STACK_BASE r9, r9 93*4882a593Smuzhiyun 94*4882a593Smuzhiyun /* save U mode SP @ pt_regs->sp */ 95*4882a593Smuzhiyun st sp, [r9, PT_sp - SZ_PT_REGS] 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun /* final SP switch */ 98*4882a593Smuzhiyun mov sp, r9 99*4882a593Smuzhiyun 66: 100*4882a593Smuzhiyun .endm 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun /*------------------------------------------------------------ 103*4882a593Smuzhiyun * "FAKE" a rtie to return from CPU Exception context 104*4882a593Smuzhiyun * This is to re-enable Exceptions within exception 105*4882a593Smuzhiyun * Look at EV_ProtV to see how this is actually used 106*4882a593Smuzhiyun *-------------------------------------------------------------*/ 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun .macro FAKE_RET_FROM_EXCPN 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun lr r9, [status32] 111*4882a593Smuzhiyun bclr r9, r9, STATUS_AE_BIT 112*4882a593Smuzhiyun or r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK) 113*4882a593Smuzhiyun sr r9, [erstatus] 114*4882a593Smuzhiyun mov r9, 55f 115*4882a593Smuzhiyun sr r9, [eret] 116*4882a593Smuzhiyun rtie 117*4882a593Smuzhiyun 55: 118*4882a593Smuzhiyun .endm 119*4882a593Smuzhiyun 120*4882a593Smuzhiyun /*-------------------------------------------------------------- 121*4882a593Smuzhiyun * For early Exception/ISR Prologue, a core reg is temporarily needed to 122*4882a593Smuzhiyun * code the rest of prolog (stack switching). This is done by stashing 123*4882a593Smuzhiyun * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP). 124*4882a593Smuzhiyun * 125*4882a593Smuzhiyun * Before saving the full regfile - this reg is restored back, only 126*4882a593Smuzhiyun * to be saved again on kernel mode stack, as part of pt_regs. 127*4882a593Smuzhiyun *-------------------------------------------------------------*/ 128*4882a593Smuzhiyun .macro PROLOG_FREEUP_REG reg, mem 129*4882a593Smuzhiyun #ifndef ARC_USE_SCRATCH_REG 130*4882a593Smuzhiyun sr \reg, [ARC_REG_SCRATCH_DATA0] 131*4882a593Smuzhiyun #else 132*4882a593Smuzhiyun st \reg, [\mem] 133*4882a593Smuzhiyun #endif 134*4882a593Smuzhiyun .endm 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun .macro PROLOG_RESTORE_REG reg, mem 137*4882a593Smuzhiyun #ifndef ARC_USE_SCRATCH_REG 138*4882a593Smuzhiyun lr \reg, [ARC_REG_SCRATCH_DATA0] 139*4882a593Smuzhiyun #else 140*4882a593Smuzhiyun ld \reg, [\mem] 141*4882a593Smuzhiyun #endif 142*4882a593Smuzhiyun .endm 143*4882a593Smuzhiyun 144*4882a593Smuzhiyun /*-------------------------------------------------------------- 145*4882a593Smuzhiyun * Exception Entry prologue 146*4882a593Smuzhiyun * -Switches stack to K mode (if not already) 147*4882a593Smuzhiyun * -Saves the register file 148*4882a593Smuzhiyun * 149*4882a593Smuzhiyun * After this it is safe to call the "C" handlers 150*4882a593Smuzhiyun *-------------------------------------------------------------*/ 151*4882a593Smuzhiyun .macro EXCEPTION_PROLOGUE 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun /* Need at least 1 reg to code the early exception prologue */ 154*4882a593Smuzhiyun PROLOG_FREEUP_REG r9, @ex_saved_reg1 155*4882a593Smuzhiyun 156*4882a593Smuzhiyun /* U/K mode at time of exception (stack not switched if already K) */ 157*4882a593Smuzhiyun lr r9, [erstatus] 158*4882a593Smuzhiyun 159*4882a593Smuzhiyun /* ARC700 doesn't provide auto-stack switching */ 160*4882a593Smuzhiyun SWITCH_TO_KERNEL_STK 161*4882a593Smuzhiyun 162*4882a593Smuzhiyun #ifdef CONFIG_ARC_CURR_IN_REG 163*4882a593Smuzhiyun /* Treat r25 as scratch reg (save on stack) and load with "current" */ 164*4882a593Smuzhiyun PUSH r25 165*4882a593Smuzhiyun GET_CURR_TASK_ON_CPU r25 166*4882a593Smuzhiyun #else 167*4882a593Smuzhiyun sub sp, sp, 4 168*4882a593Smuzhiyun #endif 169*4882a593Smuzhiyun 170*4882a593Smuzhiyun st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */ 171*4882a593Smuzhiyun sub sp, sp, 4 /* skip pt_regs->sp, already saved above */ 172*4882a593Smuzhiyun 173*4882a593Smuzhiyun /* Restore r9 used to code the early prologue */ 174*4882a593Smuzhiyun PROLOG_RESTORE_REG r9, @ex_saved_reg1 175*4882a593Smuzhiyun 176*4882a593Smuzhiyun /* now we are ready to save the regfile */ 177*4882a593Smuzhiyun SAVE_R0_TO_R12 178*4882a593Smuzhiyun PUSH gp 179*4882a593Smuzhiyun PUSH fp 180*4882a593Smuzhiyun PUSH blink 181*4882a593Smuzhiyun PUSHAX eret 182*4882a593Smuzhiyun PUSHAX erstatus 183*4882a593Smuzhiyun PUSH lp_count 184*4882a593Smuzhiyun PUSHAX lp_end 185*4882a593Smuzhiyun PUSHAX lp_start 186*4882a593Smuzhiyun PUSHAX erbta 187*4882a593Smuzhiyun 188*4882a593Smuzhiyun lr r10, [ecr] 189*4882a593Smuzhiyun st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */ 190*4882a593Smuzhiyun .endm 191*4882a593Smuzhiyun 192*4882a593Smuzhiyun /*-------------------------------------------------------------- 193*4882a593Smuzhiyun * Restore all registers used by system call or Exceptions 194*4882a593Smuzhiyun * SP should always be pointing to the next free stack element 195*4882a593Smuzhiyun * when entering this macro. 196*4882a593Smuzhiyun * 197*4882a593Smuzhiyun * NOTE: 198*4882a593Smuzhiyun * 199*4882a593Smuzhiyun * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg 200*4882a593Smuzhiyun * for memory load operations. If used in that way interrupts are deffered 201*4882a593Smuzhiyun * by hardware and that is not good. 202*4882a593Smuzhiyun *-------------------------------------------------------------*/ 203*4882a593Smuzhiyun .macro EXCEPTION_EPILOGUE 204*4882a593Smuzhiyun 205*4882a593Smuzhiyun POPAX erbta 206*4882a593Smuzhiyun POPAX lp_start 207*4882a593Smuzhiyun POPAX lp_end 208*4882a593Smuzhiyun 209*4882a593Smuzhiyun POP r9 210*4882a593Smuzhiyun mov lp_count, r9 ;LD to lp_count is not allowed 211*4882a593Smuzhiyun 212*4882a593Smuzhiyun POPAX erstatus 213*4882a593Smuzhiyun POPAX eret 214*4882a593Smuzhiyun POP blink 215*4882a593Smuzhiyun POP fp 216*4882a593Smuzhiyun POP gp 217*4882a593Smuzhiyun RESTORE_R12_TO_R0 218*4882a593Smuzhiyun 219*4882a593Smuzhiyun #ifdef CONFIG_ARC_CURR_IN_REG 220*4882a593Smuzhiyun ld r25, [sp, 12] 221*4882a593Smuzhiyun #endif 222*4882a593Smuzhiyun ld sp, [sp] /* restore original sp */ 223*4882a593Smuzhiyun /* orig_r0, ECR, user_r25 skipped automatically */ 224*4882a593Smuzhiyun .endm 225*4882a593Smuzhiyun 226*4882a593Smuzhiyun /* Dummy ECR values for Interrupts */ 227*4882a593Smuzhiyun #define event_IRQ1 0x0031abcd 228*4882a593Smuzhiyun #define event_IRQ2 0x0032abcd 229*4882a593Smuzhiyun 230*4882a593Smuzhiyun .macro INTERRUPT_PROLOGUE LVL 231*4882a593Smuzhiyun 232*4882a593Smuzhiyun /* free up r9 as scratchpad */ 233*4882a593Smuzhiyun PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg 234*4882a593Smuzhiyun 235*4882a593Smuzhiyun /* Which mode (user/kernel) was the system in when intr occurred */ 236*4882a593Smuzhiyun lr r9, [status32_l\LVL\()] 237*4882a593Smuzhiyun 238*4882a593Smuzhiyun SWITCH_TO_KERNEL_STK 239*4882a593Smuzhiyun 240*4882a593Smuzhiyun #ifdef CONFIG_ARC_CURR_IN_REG 241*4882a593Smuzhiyun /* Treat r25 as scratch reg (save on stack) and load with "current" */ 242*4882a593Smuzhiyun PUSH r25 243*4882a593Smuzhiyun GET_CURR_TASK_ON_CPU r25 244*4882a593Smuzhiyun #else 245*4882a593Smuzhiyun sub sp, sp, 4 246*4882a593Smuzhiyun #endif 247*4882a593Smuzhiyun 248*4882a593Smuzhiyun PUSH 0x003\LVL\()abcd /* Dummy ECR */ 249*4882a593Smuzhiyun sub sp, sp, 8 /* skip orig_r0 (not needed) 250*4882a593Smuzhiyun skip pt_regs->sp, already saved above */ 251*4882a593Smuzhiyun 252*4882a593Smuzhiyun /* Restore r9 used to code the early prologue */ 253*4882a593Smuzhiyun PROLOG_RESTORE_REG r9, @int\LVL\()_saved_reg 254*4882a593Smuzhiyun 255*4882a593Smuzhiyun SAVE_R0_TO_R12 256*4882a593Smuzhiyun PUSH gp 257*4882a593Smuzhiyun PUSH fp 258*4882a593Smuzhiyun PUSH blink 259*4882a593Smuzhiyun PUSH ilink\LVL\() 260*4882a593Smuzhiyun PUSHAX status32_l\LVL\() 261*4882a593Smuzhiyun PUSH lp_count 262*4882a593Smuzhiyun PUSHAX lp_end 263*4882a593Smuzhiyun PUSHAX lp_start 264*4882a593Smuzhiyun PUSHAX bta_l\LVL\() 265*4882a593Smuzhiyun 266*4882a593Smuzhiyun .endm 267*4882a593Smuzhiyun 268*4882a593Smuzhiyun /*-------------------------------------------------------------- 269*4882a593Smuzhiyun * Restore all registers used by interrupt handlers. 270*4882a593Smuzhiyun * 271*4882a593Smuzhiyun * NOTE: 272*4882a593Smuzhiyun * 273*4882a593Smuzhiyun * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg 274*4882a593Smuzhiyun * for memory load operations. If used in that way interrupts are deffered 275*4882a593Smuzhiyun * by hardware and that is not good. 276*4882a593Smuzhiyun *-------------------------------------------------------------*/ 277*4882a593Smuzhiyun .macro INTERRUPT_EPILOGUE LVL 278*4882a593Smuzhiyun 279*4882a593Smuzhiyun POPAX bta_l\LVL\() 280*4882a593Smuzhiyun POPAX lp_start 281*4882a593Smuzhiyun POPAX lp_end 282*4882a593Smuzhiyun 283*4882a593Smuzhiyun POP r9 284*4882a593Smuzhiyun mov lp_count, r9 ;LD to lp_count is not allowed 285*4882a593Smuzhiyun 286*4882a593Smuzhiyun POPAX status32_l\LVL\() 287*4882a593Smuzhiyun POP ilink\LVL\() 288*4882a593Smuzhiyun POP blink 289*4882a593Smuzhiyun POP fp 290*4882a593Smuzhiyun POP gp 291*4882a593Smuzhiyun RESTORE_R12_TO_R0 292*4882a593Smuzhiyun 293*4882a593Smuzhiyun #ifdef CONFIG_ARC_CURR_IN_REG 294*4882a593Smuzhiyun ld r25, [sp, 12] 295*4882a593Smuzhiyun #endif 296*4882a593Smuzhiyun ld sp, [sp] /* restore original sp */ 297*4882a593Smuzhiyun /* orig_r0, ECR, user_r25 skipped automatically */ 298*4882a593Smuzhiyun .endm 299*4882a593Smuzhiyun 300*4882a593Smuzhiyun /* Get thread_info of "current" tsk */ 301*4882a593Smuzhiyun .macro GET_CURR_THR_INFO_FROM_SP reg 302*4882a593Smuzhiyun bic \reg, sp, (THREAD_SIZE - 1) 303*4882a593Smuzhiyun .endm 304*4882a593Smuzhiyun 305*4882a593Smuzhiyun /* Get CPU-ID of this core */ 306*4882a593Smuzhiyun .macro GET_CPU_ID reg 307*4882a593Smuzhiyun lr \reg, [identity] 308*4882a593Smuzhiyun lsr \reg, \reg, 8 309*4882a593Smuzhiyun bmsk \reg, \reg, 7 310*4882a593Smuzhiyun .endm 311*4882a593Smuzhiyun 312*4882a593Smuzhiyun #endif /* __ASM_ARC_ENTRY_COMPACT_H */ 313