1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Copyright IBM Corp. 2007 5*4882a593Smuzhiyun * Copyright 2011 Freescale Semiconductor, Inc. 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * Authors: Hollis Blanchard <hollisb@us.ibm.com> 8*4882a593Smuzhiyun */ 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun#include <asm/ppc_asm.h> 11*4882a593Smuzhiyun#include <asm/kvm_asm.h> 12*4882a593Smuzhiyun#include <asm/reg.h> 13*4882a593Smuzhiyun#include <asm/page.h> 14*4882a593Smuzhiyun#include <asm/asm-offsets.h> 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun/* The host stack layout: */ 17*4882a593Smuzhiyun#define HOST_R1 0 /* Implied by stwu. */ 18*4882a593Smuzhiyun#define HOST_CALLEE_LR 4 19*4882a593Smuzhiyun#define HOST_RUN 8 20*4882a593Smuzhiyun/* r2 is special: it holds 'current', and it made nonvolatile in the 21*4882a593Smuzhiyun * kernel with the -ffixed-r2 gcc option. */ 22*4882a593Smuzhiyun#define HOST_R2 12 23*4882a593Smuzhiyun#define HOST_CR 16 24*4882a593Smuzhiyun#define HOST_NV_GPRS 20 25*4882a593Smuzhiyun#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) 26*4882a593Smuzhiyun#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) 27*4882a593Smuzhiyun#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) 28*4882a593Smuzhiyun#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ 29*4882a593Smuzhiyun#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ 32*4882a593Smuzhiyun (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ 33*4882a593Smuzhiyun (1<<BOOKE_INTERRUPT_DEBUG)) 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ 36*4882a593Smuzhiyun (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ 37*4882a593Smuzhiyun (1<<BOOKE_INTERRUPT_ALIGNMENT)) 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ 40*4882a593Smuzhiyun (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ 41*4882a593Smuzhiyun (1<<BOOKE_INTERRUPT_PROGRAM) | \ 42*4882a593Smuzhiyun (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ 43*4882a593Smuzhiyun (1<<BOOKE_INTERRUPT_ALIGNMENT)) 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun.macro __KVM_HANDLER ivor_nr scratch srr0 46*4882a593Smuzhiyun /* Get pointer to vcpu and record exit number. */ 47*4882a593Smuzhiyun mtspr \scratch , r4 48*4882a593Smuzhiyun mfspr r4, SPRN_SPRG_THREAD 49*4882a593Smuzhiyun lwz r4, THREAD_KVM_VCPU(r4) 50*4882a593Smuzhiyun stw r3, VCPU_GPR(R3)(r4) 51*4882a593Smuzhiyun stw r5, VCPU_GPR(R5)(r4) 52*4882a593Smuzhiyun stw r6, VCPU_GPR(R6)(r4) 53*4882a593Smuzhiyun mfspr r3, \scratch 54*4882a593Smuzhiyun mfctr r5 55*4882a593Smuzhiyun stw r3, VCPU_GPR(R4)(r4) 56*4882a593Smuzhiyun stw r5, VCPU_CTR(r4) 57*4882a593Smuzhiyun mfspr r3, \srr0 58*4882a593Smuzhiyun lis r6, kvmppc_resume_host@h 59*4882a593Smuzhiyun stw r3, VCPU_PC(r4) 60*4882a593Smuzhiyun li r5, \ivor_nr 61*4882a593Smuzhiyun ori r6, r6, kvmppc_resume_host@l 62*4882a593Smuzhiyun mtctr r6 63*4882a593Smuzhiyun bctr 64*4882a593Smuzhiyun.endm 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun.macro KVM_HANDLER ivor_nr scratch srr0 67*4882a593Smuzhiyun_GLOBAL(kvmppc_handler_\ivor_nr) 68*4882a593Smuzhiyun __KVM_HANDLER \ivor_nr \scratch \srr0 69*4882a593Smuzhiyun.endm 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun.macro KVM_DBG_HANDLER ivor_nr scratch srr0 72*4882a593Smuzhiyun_GLOBAL(kvmppc_handler_\ivor_nr) 73*4882a593Smuzhiyun mtspr \scratch, r4 74*4882a593Smuzhiyun mfspr r4, SPRN_SPRG_THREAD 75*4882a593Smuzhiyun lwz r4, THREAD_KVM_VCPU(r4) 76*4882a593Smuzhiyun stw r3, VCPU_CRIT_SAVE(r4) 77*4882a593Smuzhiyun mfcr r3 78*4882a593Smuzhiyun mfspr r4, SPRN_CSRR1 79*4882a593Smuzhiyun andi. r4, r4, MSR_PR 80*4882a593Smuzhiyun bne 1f 81*4882a593Smuzhiyun /* debug interrupt happened in enter/exit path */ 82*4882a593Smuzhiyun mfspr r4, SPRN_CSRR1 83*4882a593Smuzhiyun rlwinm r4, r4, 0, ~MSR_DE 84*4882a593Smuzhiyun mtspr SPRN_CSRR1, r4 85*4882a593Smuzhiyun lis r4, 0xffff 86*4882a593Smuzhiyun ori r4, r4, 0xffff 87*4882a593Smuzhiyun mtspr SPRN_DBSR, r4 88*4882a593Smuzhiyun mfspr r4, SPRN_SPRG_THREAD 89*4882a593Smuzhiyun lwz r4, THREAD_KVM_VCPU(r4) 90*4882a593Smuzhiyun mtcr r3 91*4882a593Smuzhiyun lwz r3, VCPU_CRIT_SAVE(r4) 92*4882a593Smuzhiyun mfspr r4, \scratch 93*4882a593Smuzhiyun rfci 94*4882a593Smuzhiyun1: /* debug interrupt happened in guest */ 95*4882a593Smuzhiyun mtcr r3 96*4882a593Smuzhiyun mfspr r4, SPRN_SPRG_THREAD 97*4882a593Smuzhiyun lwz r4, THREAD_KVM_VCPU(r4) 98*4882a593Smuzhiyun lwz r3, VCPU_CRIT_SAVE(r4) 99*4882a593Smuzhiyun mfspr r4, \scratch 100*4882a593Smuzhiyun __KVM_HANDLER \ivor_nr \scratch \srr0 101*4882a593Smuzhiyun.endm 102*4882a593Smuzhiyun 103*4882a593Smuzhiyun.macro KVM_HANDLER_ADDR ivor_nr 104*4882a593Smuzhiyun .long kvmppc_handler_\ivor_nr 105*4882a593Smuzhiyun.endm 106*4882a593Smuzhiyun 107*4882a593Smuzhiyun.macro KVM_HANDLER_END 108*4882a593Smuzhiyun .long kvmppc_handlers_end 109*4882a593Smuzhiyun.endm 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun_GLOBAL(kvmppc_handlers_start) 112*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 113*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0 114*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 115*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 116*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 117*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 118*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0 119*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 120*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 121*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 122*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0 123*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 124*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 125*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 126*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 127*4882a593SmuzhiyunKVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 128*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 129*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0 130*4882a593SmuzhiyunKVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0 131*4882a593Smuzhiyun_GLOBAL(kvmppc_handlers_end) 132*4882a593Smuzhiyun 133*4882a593Smuzhiyun/* Registers: 134*4882a593Smuzhiyun * SPRG_SCRATCH0: guest r4 135*4882a593Smuzhiyun * r4: vcpu pointer 136*4882a593Smuzhiyun * r5: KVM exit number 137*4882a593Smuzhiyun */ 138*4882a593Smuzhiyun_GLOBAL(kvmppc_resume_host) 139*4882a593Smuzhiyun mfcr r3 140*4882a593Smuzhiyun stw r3, VCPU_CR(r4) 141*4882a593Smuzhiyun stw r7, VCPU_GPR(R7)(r4) 142*4882a593Smuzhiyun stw r8, VCPU_GPR(R8)(r4) 143*4882a593Smuzhiyun stw r9, VCPU_GPR(R9)(r4) 144*4882a593Smuzhiyun 145*4882a593Smuzhiyun li r6, 1 146*4882a593Smuzhiyun slw r6, r6, r5 147*4882a593Smuzhiyun 148*4882a593Smuzhiyun#ifdef CONFIG_KVM_EXIT_TIMING 149*4882a593Smuzhiyun /* save exit time */ 150*4882a593Smuzhiyun1: 151*4882a593Smuzhiyun mfspr r7, SPRN_TBRU 152*4882a593Smuzhiyun mfspr r8, SPRN_TBRL 153*4882a593Smuzhiyun mfspr r9, SPRN_TBRU 154*4882a593Smuzhiyun cmpw r9, r7 155*4882a593Smuzhiyun bne 1b 156*4882a593Smuzhiyun stw r8, VCPU_TIMING_EXIT_TBL(r4) 157*4882a593Smuzhiyun stw r9, VCPU_TIMING_EXIT_TBU(r4) 158*4882a593Smuzhiyun#endif 159*4882a593Smuzhiyun 160*4882a593Smuzhiyun /* Save the faulting instruction and all GPRs for emulation. */ 161*4882a593Smuzhiyun andi. r7, r6, NEED_INST_MASK 162*4882a593Smuzhiyun beq ..skip_inst_copy 163*4882a593Smuzhiyun mfspr r9, SPRN_SRR0 164*4882a593Smuzhiyun mfmsr r8 165*4882a593Smuzhiyun ori r7, r8, MSR_DS 166*4882a593Smuzhiyun mtmsr r7 167*4882a593Smuzhiyun isync 168*4882a593Smuzhiyun lwz r9, 0(r9) 169*4882a593Smuzhiyun mtmsr r8 170*4882a593Smuzhiyun isync 171*4882a593Smuzhiyun stw r9, VCPU_LAST_INST(r4) 172*4882a593Smuzhiyun 173*4882a593Smuzhiyun stw r15, VCPU_GPR(R15)(r4) 174*4882a593Smuzhiyun stw r16, VCPU_GPR(R16)(r4) 175*4882a593Smuzhiyun stw r17, VCPU_GPR(R17)(r4) 176*4882a593Smuzhiyun stw r18, VCPU_GPR(R18)(r4) 177*4882a593Smuzhiyun stw r19, VCPU_GPR(R19)(r4) 178*4882a593Smuzhiyun stw r20, VCPU_GPR(R20)(r4) 179*4882a593Smuzhiyun stw r21, VCPU_GPR(R21)(r4) 180*4882a593Smuzhiyun stw r22, VCPU_GPR(R22)(r4) 181*4882a593Smuzhiyun stw r23, VCPU_GPR(R23)(r4) 182*4882a593Smuzhiyun stw r24, VCPU_GPR(R24)(r4) 183*4882a593Smuzhiyun stw r25, VCPU_GPR(R25)(r4) 184*4882a593Smuzhiyun stw r26, VCPU_GPR(R26)(r4) 185*4882a593Smuzhiyun stw r27, VCPU_GPR(R27)(r4) 186*4882a593Smuzhiyun stw r28, VCPU_GPR(R28)(r4) 187*4882a593Smuzhiyun stw r29, VCPU_GPR(R29)(r4) 188*4882a593Smuzhiyun stw r30, VCPU_GPR(R30)(r4) 189*4882a593Smuzhiyun stw r31, VCPU_GPR(R31)(r4) 190*4882a593Smuzhiyun..skip_inst_copy: 191*4882a593Smuzhiyun 192*4882a593Smuzhiyun /* Also grab DEAR and ESR before the host can clobber them. */ 193*4882a593Smuzhiyun 194*4882a593Smuzhiyun andi. r7, r6, NEED_DEAR_MASK 195*4882a593Smuzhiyun beq ..skip_dear 196*4882a593Smuzhiyun mfspr r9, SPRN_DEAR 197*4882a593Smuzhiyun stw r9, VCPU_FAULT_DEAR(r4) 198*4882a593Smuzhiyun..skip_dear: 199*4882a593Smuzhiyun 200*4882a593Smuzhiyun andi. r7, r6, NEED_ESR_MASK 201*4882a593Smuzhiyun beq ..skip_esr 202*4882a593Smuzhiyun mfspr r9, SPRN_ESR 203*4882a593Smuzhiyun stw r9, VCPU_FAULT_ESR(r4) 204*4882a593Smuzhiyun..skip_esr: 205*4882a593Smuzhiyun 206*4882a593Smuzhiyun /* Save remaining volatile guest register state to vcpu. */ 207*4882a593Smuzhiyun stw r0, VCPU_GPR(R0)(r4) 208*4882a593Smuzhiyun stw r1, VCPU_GPR(R1)(r4) 209*4882a593Smuzhiyun stw r2, VCPU_GPR(R2)(r4) 210*4882a593Smuzhiyun stw r10, VCPU_GPR(R10)(r4) 211*4882a593Smuzhiyun stw r11, VCPU_GPR(R11)(r4) 212*4882a593Smuzhiyun stw r12, VCPU_GPR(R12)(r4) 213*4882a593Smuzhiyun stw r13, VCPU_GPR(R13)(r4) 214*4882a593Smuzhiyun stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ 215*4882a593Smuzhiyun mflr r3 216*4882a593Smuzhiyun stw r3, VCPU_LR(r4) 217*4882a593Smuzhiyun mfxer r3 218*4882a593Smuzhiyun stw r3, VCPU_XER(r4) 219*4882a593Smuzhiyun 220*4882a593Smuzhiyun /* Restore host stack pointer and PID before IVPR, since the host 221*4882a593Smuzhiyun * exception handlers use them. */ 222*4882a593Smuzhiyun lwz r1, VCPU_HOST_STACK(r4) 223*4882a593Smuzhiyun lwz r3, VCPU_HOST_PID(r4) 224*4882a593Smuzhiyun mtspr SPRN_PID, r3 225*4882a593Smuzhiyun 226*4882a593Smuzhiyun#ifdef CONFIG_FSL_BOOKE 227*4882a593Smuzhiyun /* we cheat and know that Linux doesn't use PID1 which is always 0 */ 228*4882a593Smuzhiyun lis r3, 0 229*4882a593Smuzhiyun mtspr SPRN_PID1, r3 230*4882a593Smuzhiyun#endif 231*4882a593Smuzhiyun 232*4882a593Smuzhiyun /* Restore host IVPR before re-enabling interrupts. We cheat and know 233*4882a593Smuzhiyun * that Linux IVPR is always 0xc0000000. */ 234*4882a593Smuzhiyun lis r3, 0xc000 235*4882a593Smuzhiyun mtspr SPRN_IVPR, r3 236*4882a593Smuzhiyun 237*4882a593Smuzhiyun /* Switch to kernel stack and jump to handler. */ 238*4882a593Smuzhiyun LOAD_REG_ADDR(r3, kvmppc_handle_exit) 239*4882a593Smuzhiyun mtctr r3 240*4882a593Smuzhiyun mr r3, r4 241*4882a593Smuzhiyun lwz r2, HOST_R2(r1) 242*4882a593Smuzhiyun mr r14, r4 /* Save vcpu pointer. */ 243*4882a593Smuzhiyun 244*4882a593Smuzhiyun bctrl /* kvmppc_handle_exit() */ 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun /* Restore vcpu pointer and the nonvolatiles we used. */ 247*4882a593Smuzhiyun mr r4, r14 248*4882a593Smuzhiyun lwz r14, VCPU_GPR(R14)(r4) 249*4882a593Smuzhiyun 250*4882a593Smuzhiyun /* Sometimes instruction emulation must restore complete GPR state. */ 251*4882a593Smuzhiyun andi. r5, r3, RESUME_FLAG_NV 252*4882a593Smuzhiyun beq ..skip_nv_load 253*4882a593Smuzhiyun lwz r15, VCPU_GPR(R15)(r4) 254*4882a593Smuzhiyun lwz r16, VCPU_GPR(R16)(r4) 255*4882a593Smuzhiyun lwz r17, VCPU_GPR(R17)(r4) 256*4882a593Smuzhiyun lwz r18, VCPU_GPR(R18)(r4) 257*4882a593Smuzhiyun lwz r19, VCPU_GPR(R19)(r4) 258*4882a593Smuzhiyun lwz r20, VCPU_GPR(R20)(r4) 259*4882a593Smuzhiyun lwz r21, VCPU_GPR(R21)(r4) 260*4882a593Smuzhiyun lwz r22, VCPU_GPR(R22)(r4) 261*4882a593Smuzhiyun lwz r23, VCPU_GPR(R23)(r4) 262*4882a593Smuzhiyun lwz r24, VCPU_GPR(R24)(r4) 263*4882a593Smuzhiyun lwz r25, VCPU_GPR(R25)(r4) 264*4882a593Smuzhiyun lwz r26, VCPU_GPR(R26)(r4) 265*4882a593Smuzhiyun lwz r27, VCPU_GPR(R27)(r4) 266*4882a593Smuzhiyun lwz r28, VCPU_GPR(R28)(r4) 267*4882a593Smuzhiyun lwz r29, VCPU_GPR(R29)(r4) 268*4882a593Smuzhiyun lwz r30, VCPU_GPR(R30)(r4) 269*4882a593Smuzhiyun lwz r31, VCPU_GPR(R31)(r4) 270*4882a593Smuzhiyun..skip_nv_load: 271*4882a593Smuzhiyun 272*4882a593Smuzhiyun /* Should we return to the guest? */ 273*4882a593Smuzhiyun andi. r5, r3, RESUME_FLAG_HOST 274*4882a593Smuzhiyun beq lightweight_exit 275*4882a593Smuzhiyun 276*4882a593Smuzhiyun srawi r3, r3, 2 /* Shift -ERR back down. */ 277*4882a593Smuzhiyun 278*4882a593Smuzhiyunheavyweight_exit: 279*4882a593Smuzhiyun /* Not returning to guest. */ 280*4882a593Smuzhiyun 281*4882a593Smuzhiyun#ifdef CONFIG_SPE 282*4882a593Smuzhiyun /* save guest SPEFSCR and load host SPEFSCR */ 283*4882a593Smuzhiyun mfspr r9, SPRN_SPEFSCR 284*4882a593Smuzhiyun stw r9, VCPU_SPEFSCR(r4) 285*4882a593Smuzhiyun lwz r9, VCPU_HOST_SPEFSCR(r4) 286*4882a593Smuzhiyun mtspr SPRN_SPEFSCR, r9 287*4882a593Smuzhiyun#endif 288*4882a593Smuzhiyun 289*4882a593Smuzhiyun /* We already saved guest volatile register state; now save the 290*4882a593Smuzhiyun * non-volatiles. */ 291*4882a593Smuzhiyun stw r15, VCPU_GPR(R15)(r4) 292*4882a593Smuzhiyun stw r16, VCPU_GPR(R16)(r4) 293*4882a593Smuzhiyun stw r17, VCPU_GPR(R17)(r4) 294*4882a593Smuzhiyun stw r18, VCPU_GPR(R18)(r4) 295*4882a593Smuzhiyun stw r19, VCPU_GPR(R19)(r4) 296*4882a593Smuzhiyun stw r20, VCPU_GPR(R20)(r4) 297*4882a593Smuzhiyun stw r21, VCPU_GPR(R21)(r4) 298*4882a593Smuzhiyun stw r22, VCPU_GPR(R22)(r4) 299*4882a593Smuzhiyun stw r23, VCPU_GPR(R23)(r4) 300*4882a593Smuzhiyun stw r24, VCPU_GPR(R24)(r4) 301*4882a593Smuzhiyun stw r25, VCPU_GPR(R25)(r4) 302*4882a593Smuzhiyun stw r26, VCPU_GPR(R26)(r4) 303*4882a593Smuzhiyun stw r27, VCPU_GPR(R27)(r4) 304*4882a593Smuzhiyun stw r28, VCPU_GPR(R28)(r4) 305*4882a593Smuzhiyun stw r29, VCPU_GPR(R29)(r4) 306*4882a593Smuzhiyun stw r30, VCPU_GPR(R30)(r4) 307*4882a593Smuzhiyun stw r31, VCPU_GPR(R31)(r4) 308*4882a593Smuzhiyun 309*4882a593Smuzhiyun /* Load host non-volatile register state from host stack. */ 310*4882a593Smuzhiyun lwz r14, HOST_NV_GPR(R14)(r1) 311*4882a593Smuzhiyun lwz r15, HOST_NV_GPR(R15)(r1) 312*4882a593Smuzhiyun lwz r16, HOST_NV_GPR(R16)(r1) 313*4882a593Smuzhiyun lwz r17, HOST_NV_GPR(R17)(r1) 314*4882a593Smuzhiyun lwz r18, HOST_NV_GPR(R18)(r1) 315*4882a593Smuzhiyun lwz r19, HOST_NV_GPR(R19)(r1) 316*4882a593Smuzhiyun lwz r20, HOST_NV_GPR(R20)(r1) 317*4882a593Smuzhiyun lwz r21, HOST_NV_GPR(R21)(r1) 318*4882a593Smuzhiyun lwz r22, HOST_NV_GPR(R22)(r1) 319*4882a593Smuzhiyun lwz r23, HOST_NV_GPR(R23)(r1) 320*4882a593Smuzhiyun lwz r24, HOST_NV_GPR(R24)(r1) 321*4882a593Smuzhiyun lwz r25, HOST_NV_GPR(R25)(r1) 322*4882a593Smuzhiyun lwz r26, HOST_NV_GPR(R26)(r1) 323*4882a593Smuzhiyun lwz r27, HOST_NV_GPR(R27)(r1) 324*4882a593Smuzhiyun lwz r28, HOST_NV_GPR(R28)(r1) 325*4882a593Smuzhiyun lwz r29, HOST_NV_GPR(R29)(r1) 326*4882a593Smuzhiyun lwz r30, HOST_NV_GPR(R30)(r1) 327*4882a593Smuzhiyun lwz r31, HOST_NV_GPR(R31)(r1) 328*4882a593Smuzhiyun 329*4882a593Smuzhiyun /* Return to kvm_vcpu_run(). */ 330*4882a593Smuzhiyun lwz r4, HOST_STACK_LR(r1) 331*4882a593Smuzhiyun lwz r5, HOST_CR(r1) 332*4882a593Smuzhiyun addi r1, r1, HOST_STACK_SIZE 333*4882a593Smuzhiyun mtlr r4 334*4882a593Smuzhiyun mtcr r5 335*4882a593Smuzhiyun /* r3 still contains the return code from kvmppc_handle_exit(). */ 336*4882a593Smuzhiyun blr 337*4882a593Smuzhiyun 338*4882a593Smuzhiyun 339*4882a593Smuzhiyun/* Registers: 340*4882a593Smuzhiyun * r3: vcpu pointer 341*4882a593Smuzhiyun */ 342*4882a593Smuzhiyun_GLOBAL(__kvmppc_vcpu_run) 343*4882a593Smuzhiyun stwu r1, -HOST_STACK_SIZE(r1) 344*4882a593Smuzhiyun stw r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */ 345*4882a593Smuzhiyun 346*4882a593Smuzhiyun /* Save host state to stack. */ 347*4882a593Smuzhiyun mr r4, r3 348*4882a593Smuzhiyun mflr r3 349*4882a593Smuzhiyun stw r3, HOST_STACK_LR(r1) 350*4882a593Smuzhiyun mfcr r5 351*4882a593Smuzhiyun stw r5, HOST_CR(r1) 352*4882a593Smuzhiyun 353*4882a593Smuzhiyun /* Save host non-volatile register state to stack. */ 354*4882a593Smuzhiyun stw r14, HOST_NV_GPR(R14)(r1) 355*4882a593Smuzhiyun stw r15, HOST_NV_GPR(R15)(r1) 356*4882a593Smuzhiyun stw r16, HOST_NV_GPR(R16)(r1) 357*4882a593Smuzhiyun stw r17, HOST_NV_GPR(R17)(r1) 358*4882a593Smuzhiyun stw r18, HOST_NV_GPR(R18)(r1) 359*4882a593Smuzhiyun stw r19, HOST_NV_GPR(R19)(r1) 360*4882a593Smuzhiyun stw r20, HOST_NV_GPR(R20)(r1) 361*4882a593Smuzhiyun stw r21, HOST_NV_GPR(R21)(r1) 362*4882a593Smuzhiyun stw r22, HOST_NV_GPR(R22)(r1) 363*4882a593Smuzhiyun stw r23, HOST_NV_GPR(R23)(r1) 364*4882a593Smuzhiyun stw r24, HOST_NV_GPR(R24)(r1) 365*4882a593Smuzhiyun stw r25, HOST_NV_GPR(R25)(r1) 366*4882a593Smuzhiyun stw r26, HOST_NV_GPR(R26)(r1) 367*4882a593Smuzhiyun stw r27, HOST_NV_GPR(R27)(r1) 368*4882a593Smuzhiyun stw r28, HOST_NV_GPR(R28)(r1) 369*4882a593Smuzhiyun stw r29, HOST_NV_GPR(R29)(r1) 370*4882a593Smuzhiyun stw r30, HOST_NV_GPR(R30)(r1) 371*4882a593Smuzhiyun stw r31, HOST_NV_GPR(R31)(r1) 372*4882a593Smuzhiyun 373*4882a593Smuzhiyun /* Load guest non-volatiles. */ 374*4882a593Smuzhiyun lwz r14, VCPU_GPR(R14)(r4) 375*4882a593Smuzhiyun lwz r15, VCPU_GPR(R15)(r4) 376*4882a593Smuzhiyun lwz r16, VCPU_GPR(R16)(r4) 377*4882a593Smuzhiyun lwz r17, VCPU_GPR(R17)(r4) 378*4882a593Smuzhiyun lwz r18, VCPU_GPR(R18)(r4) 379*4882a593Smuzhiyun lwz r19, VCPU_GPR(R19)(r4) 380*4882a593Smuzhiyun lwz r20, VCPU_GPR(R20)(r4) 381*4882a593Smuzhiyun lwz r21, VCPU_GPR(R21)(r4) 382*4882a593Smuzhiyun lwz r22, VCPU_GPR(R22)(r4) 383*4882a593Smuzhiyun lwz r23, VCPU_GPR(R23)(r4) 384*4882a593Smuzhiyun lwz r24, VCPU_GPR(R24)(r4) 385*4882a593Smuzhiyun lwz r25, VCPU_GPR(R25)(r4) 386*4882a593Smuzhiyun lwz r26, VCPU_GPR(R26)(r4) 387*4882a593Smuzhiyun lwz r27, VCPU_GPR(R27)(r4) 388*4882a593Smuzhiyun lwz r28, VCPU_GPR(R28)(r4) 389*4882a593Smuzhiyun lwz r29, VCPU_GPR(R29)(r4) 390*4882a593Smuzhiyun lwz r30, VCPU_GPR(R30)(r4) 391*4882a593Smuzhiyun lwz r31, VCPU_GPR(R31)(r4) 392*4882a593Smuzhiyun 393*4882a593Smuzhiyun#ifdef CONFIG_SPE 394*4882a593Smuzhiyun /* save host SPEFSCR and load guest SPEFSCR */ 395*4882a593Smuzhiyun mfspr r3, SPRN_SPEFSCR 396*4882a593Smuzhiyun stw r3, VCPU_HOST_SPEFSCR(r4) 397*4882a593Smuzhiyun lwz r3, VCPU_SPEFSCR(r4) 398*4882a593Smuzhiyun mtspr SPRN_SPEFSCR, r3 399*4882a593Smuzhiyun#endif 400*4882a593Smuzhiyun 401*4882a593Smuzhiyunlightweight_exit: 402*4882a593Smuzhiyun stw r2, HOST_R2(r1) 403*4882a593Smuzhiyun 404*4882a593Smuzhiyun mfspr r3, SPRN_PID 405*4882a593Smuzhiyun stw r3, VCPU_HOST_PID(r4) 406*4882a593Smuzhiyun lwz r3, VCPU_SHADOW_PID(r4) 407*4882a593Smuzhiyun mtspr SPRN_PID, r3 408*4882a593Smuzhiyun 409*4882a593Smuzhiyun#ifdef CONFIG_FSL_BOOKE 410*4882a593Smuzhiyun lwz r3, VCPU_SHADOW_PID1(r4) 411*4882a593Smuzhiyun mtspr SPRN_PID1, r3 412*4882a593Smuzhiyun#endif 413*4882a593Smuzhiyun 414*4882a593Smuzhiyun /* Load some guest volatiles. */ 415*4882a593Smuzhiyun lwz r0, VCPU_GPR(R0)(r4) 416*4882a593Smuzhiyun lwz r2, VCPU_GPR(R2)(r4) 417*4882a593Smuzhiyun lwz r9, VCPU_GPR(R9)(r4) 418*4882a593Smuzhiyun lwz r10, VCPU_GPR(R10)(r4) 419*4882a593Smuzhiyun lwz r11, VCPU_GPR(R11)(r4) 420*4882a593Smuzhiyun lwz r12, VCPU_GPR(R12)(r4) 421*4882a593Smuzhiyun lwz r13, VCPU_GPR(R13)(r4) 422*4882a593Smuzhiyun lwz r3, VCPU_LR(r4) 423*4882a593Smuzhiyun mtlr r3 424*4882a593Smuzhiyun lwz r3, VCPU_XER(r4) 425*4882a593Smuzhiyun mtxer r3 426*4882a593Smuzhiyun 427*4882a593Smuzhiyun /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, 428*4882a593Smuzhiyun * so how do we make sure vcpu won't fault? */ 429*4882a593Smuzhiyun lis r8, kvmppc_booke_handlers@ha 430*4882a593Smuzhiyun lwz r8, kvmppc_booke_handlers@l(r8) 431*4882a593Smuzhiyun mtspr SPRN_IVPR, r8 432*4882a593Smuzhiyun 433*4882a593Smuzhiyun lwz r5, VCPU_SHARED(r4) 434*4882a593Smuzhiyun 435*4882a593Smuzhiyun /* Can't switch the stack pointer until after IVPR is switched, 436*4882a593Smuzhiyun * because host interrupt handlers would get confused. */ 437*4882a593Smuzhiyun lwz r1, VCPU_GPR(R1)(r4) 438*4882a593Smuzhiyun 439*4882a593Smuzhiyun /* 440*4882a593Smuzhiyun * Host interrupt handlers may have clobbered these 441*4882a593Smuzhiyun * guest-readable SPRGs, or the guest kernel may have 442*4882a593Smuzhiyun * written directly to the shared area, so we 443*4882a593Smuzhiyun * need to reload them here with the guest's values. 444*4882a593Smuzhiyun */ 445*4882a593Smuzhiyun PPC_LD(r3, VCPU_SHARED_SPRG4, r5) 446*4882a593Smuzhiyun mtspr SPRN_SPRG4W, r3 447*4882a593Smuzhiyun PPC_LD(r3, VCPU_SHARED_SPRG5, r5) 448*4882a593Smuzhiyun mtspr SPRN_SPRG5W, r3 449*4882a593Smuzhiyun PPC_LD(r3, VCPU_SHARED_SPRG6, r5) 450*4882a593Smuzhiyun mtspr SPRN_SPRG6W, r3 451*4882a593Smuzhiyun PPC_LD(r3, VCPU_SHARED_SPRG7, r5) 452*4882a593Smuzhiyun mtspr SPRN_SPRG7W, r3 453*4882a593Smuzhiyun 454*4882a593Smuzhiyun#ifdef CONFIG_KVM_EXIT_TIMING 455*4882a593Smuzhiyun /* save enter time */ 456*4882a593Smuzhiyun1: 457*4882a593Smuzhiyun mfspr r6, SPRN_TBRU 458*4882a593Smuzhiyun mfspr r7, SPRN_TBRL 459*4882a593Smuzhiyun mfspr r8, SPRN_TBRU 460*4882a593Smuzhiyun cmpw r8, r6 461*4882a593Smuzhiyun bne 1b 462*4882a593Smuzhiyun stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) 463*4882a593Smuzhiyun stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) 464*4882a593Smuzhiyun#endif 465*4882a593Smuzhiyun 466*4882a593Smuzhiyun /* Finish loading guest volatiles and jump to guest. */ 467*4882a593Smuzhiyun lwz r3, VCPU_CTR(r4) 468*4882a593Smuzhiyun lwz r5, VCPU_CR(r4) 469*4882a593Smuzhiyun lwz r6, VCPU_PC(r4) 470*4882a593Smuzhiyun lwz r7, VCPU_SHADOW_MSR(r4) 471*4882a593Smuzhiyun mtctr r3 472*4882a593Smuzhiyun mtcr r5 473*4882a593Smuzhiyun mtsrr0 r6 474*4882a593Smuzhiyun mtsrr1 r7 475*4882a593Smuzhiyun lwz r5, VCPU_GPR(R5)(r4) 476*4882a593Smuzhiyun lwz r6, VCPU_GPR(R6)(r4) 477*4882a593Smuzhiyun lwz r7, VCPU_GPR(R7)(r4) 478*4882a593Smuzhiyun lwz r8, VCPU_GPR(R8)(r4) 479*4882a593Smuzhiyun 480*4882a593Smuzhiyun /* Clear any debug events which occurred since we disabled MSR[DE]. 481*4882a593Smuzhiyun * XXX This gives us a 3-instruction window in which a breakpoint 482*4882a593Smuzhiyun * intended for guest context could fire in the host instead. */ 483*4882a593Smuzhiyun lis r3, 0xffff 484*4882a593Smuzhiyun ori r3, r3, 0xffff 485*4882a593Smuzhiyun mtspr SPRN_DBSR, r3 486*4882a593Smuzhiyun 487*4882a593Smuzhiyun lwz r3, VCPU_GPR(R3)(r4) 488*4882a593Smuzhiyun lwz r4, VCPU_GPR(R4)(r4) 489*4882a593Smuzhiyun rfi 490*4882a593Smuzhiyun 491*4882a593Smuzhiyun .data 492*4882a593Smuzhiyun .align 4 493*4882a593Smuzhiyun .globl kvmppc_booke_handler_addr 494*4882a593Smuzhiyunkvmppc_booke_handler_addr: 495*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL 496*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK 497*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE 498*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE 499*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL 500*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT 501*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM 502*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL 503*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL 504*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL 505*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER 506*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT 507*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG 508*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS 509*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS 510*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG 511*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL 512*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA 513*4882a593SmuzhiyunKVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND 514*4882a593SmuzhiyunKVM_HANDLER_END /*Always keep this in end*/ 515*4882a593Smuzhiyun 516*4882a593Smuzhiyun#ifdef CONFIG_SPE 517*4882a593Smuzhiyun_GLOBAL(kvmppc_save_guest_spe) 518*4882a593Smuzhiyun cmpi 0,r3,0 519*4882a593Smuzhiyun beqlr- 520*4882a593Smuzhiyun SAVE_32EVRS(0, r4, r3, VCPU_EVR) 521*4882a593Smuzhiyun evxor evr6, evr6, evr6 522*4882a593Smuzhiyun evmwumiaa evr6, evr6, evr6 523*4882a593Smuzhiyun li r4,VCPU_ACC 524*4882a593Smuzhiyun evstddx evr6, r4, r3 /* save acc */ 525*4882a593Smuzhiyun blr 526*4882a593Smuzhiyun 527*4882a593Smuzhiyun_GLOBAL(kvmppc_load_guest_spe) 528*4882a593Smuzhiyun cmpi 0,r3,0 529*4882a593Smuzhiyun beqlr- 530*4882a593Smuzhiyun li r4,VCPU_ACC 531*4882a593Smuzhiyun evlddx evr6,r4,r3 532*4882a593Smuzhiyun evmra evr6,evr6 /* load acc */ 533*4882a593Smuzhiyun REST_32EVRS(0, r4, r3, VCPU_EVR) 534*4882a593Smuzhiyun blr 535*4882a593Smuzhiyun#endif 536