1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Copyright SUSE Linux Products GmbH 2009 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Authors: Alexander Graf <agraf@suse.de> 7*4882a593Smuzhiyun */ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun#include <asm/asm-compat.h> 10*4882a593Smuzhiyun#include <asm/feature-fixups.h> 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun#define SHADOW_SLB_ENTRY_LEN 0x10 13*4882a593Smuzhiyun#define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x) 14*4882a593Smuzhiyun#define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8) 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun/****************************************************************************** 17*4882a593Smuzhiyun * * 18*4882a593Smuzhiyun * Entry code * 19*4882a593Smuzhiyun * * 20*4882a593Smuzhiyun *****************************************************************************/ 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun.macro LOAD_GUEST_SEGMENTS 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun /* Required state: 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun * MSR = ~IR|DR 27*4882a593Smuzhiyun * R13 = PACA 28*4882a593Smuzhiyun * R1 = host R1 29*4882a593Smuzhiyun * R2 = host R2 30*4882a593Smuzhiyun * R3 = shadow vcpu 31*4882a593Smuzhiyun * all other volatile GPRS = free except R4, R6 32*4882a593Smuzhiyun * SVCPU[CR] = guest CR 33*4882a593Smuzhiyun * SVCPU[XER] = guest XER 34*4882a593Smuzhiyun * SVCPU[CTR] = guest CTR 35*4882a593Smuzhiyun * SVCPU[LR] = guest LR 36*4882a593Smuzhiyun */ 37*4882a593Smuzhiyun 38*4882a593SmuzhiyunBEGIN_FW_FTR_SECTION 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun /* Declare SLB shadow as 0 entries big */ 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun ld r11, PACA_SLBSHADOWPTR(r13) 43*4882a593Smuzhiyun li r8, 0 44*4882a593Smuzhiyun stb r8, 3(r11) 45*4882a593Smuzhiyun 46*4882a593SmuzhiyunEND_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR) 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun /* Flush SLB */ 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun li r10, 0 51*4882a593Smuzhiyun slbmte r10, r10 52*4882a593Smuzhiyun slbia 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun /* Fill SLB with our shadow */ 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun lbz r12, SVCPU_SLB_MAX(r3) 57*4882a593Smuzhiyun mulli r12, r12, 16 58*4882a593Smuzhiyun addi r12, r12, SVCPU_SLB 59*4882a593Smuzhiyun add r12, r12, r3 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */ 62*4882a593Smuzhiyun li r11, SVCPU_SLB 63*4882a593Smuzhiyun add r11, r11, r3 64*4882a593Smuzhiyun 65*4882a593Smuzhiyunslb_loop_enter: 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun ld r10, 0(r11) 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun andis. r9, r10, SLB_ESID_V@h 70*4882a593Smuzhiyun beq slb_loop_enter_skip 71*4882a593Smuzhiyun 72*4882a593Smuzhiyun ld r9, 8(r11) 73*4882a593Smuzhiyun slbmte r9, r10 74*4882a593Smuzhiyun 75*4882a593Smuzhiyunslb_loop_enter_skip: 76*4882a593Smuzhiyun addi r11, r11, 16 77*4882a593Smuzhiyun cmpd cr0, r11, r12 78*4882a593Smuzhiyun blt slb_loop_enter 79*4882a593Smuzhiyun 80*4882a593Smuzhiyunslb_do_enter: 81*4882a593Smuzhiyun 82*4882a593Smuzhiyun.endm 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun/****************************************************************************** 85*4882a593Smuzhiyun * * 86*4882a593Smuzhiyun * Exit code * 87*4882a593Smuzhiyun * * 88*4882a593Smuzhiyun *****************************************************************************/ 89*4882a593Smuzhiyun 90*4882a593Smuzhiyun.macro LOAD_HOST_SEGMENTS 91*4882a593Smuzhiyun 92*4882a593Smuzhiyun /* Register usage at this point: 93*4882a593Smuzhiyun * 94*4882a593Smuzhiyun * R1 = host R1 95*4882a593Smuzhiyun * R2 = host R2 96*4882a593Smuzhiyun * R12 = exit handler id 97*4882a593Smuzhiyun * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] 98*4882a593Smuzhiyun * SVCPU.* = guest * 99*4882a593Smuzhiyun * SVCPU[CR] = guest CR 100*4882a593Smuzhiyun * SVCPU[XER] = guest XER 101*4882a593Smuzhiyun * SVCPU[CTR] = guest CTR 102*4882a593Smuzhiyun * SVCPU[LR] = guest LR 103*4882a593Smuzhiyun * 104*4882a593Smuzhiyun */ 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun /* Remove all SLB entries that are in use. */ 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun li r0, 0 109*4882a593Smuzhiyun slbmte r0, r0 110*4882a593Smuzhiyun slbia 111*4882a593Smuzhiyun 112*4882a593Smuzhiyun /* Restore bolted entries from the shadow */ 113*4882a593Smuzhiyun 114*4882a593Smuzhiyun ld r11, PACA_SLBSHADOWPTR(r13) 115*4882a593Smuzhiyun 116*4882a593SmuzhiyunBEGIN_FW_FTR_SECTION 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun /* Declare SLB shadow as SLB_NUM_BOLTED entries big */ 119*4882a593Smuzhiyun 120*4882a593Smuzhiyun li r8, SLB_NUM_BOLTED 121*4882a593Smuzhiyun stb r8, 3(r11) 122*4882a593Smuzhiyun 123*4882a593SmuzhiyunEND_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR) 124*4882a593Smuzhiyun 125*4882a593Smuzhiyun /* Manually load all entries from shadow SLB */ 126*4882a593Smuzhiyun 127*4882a593Smuzhiyun li r8, SLBSHADOW_SAVEAREA 128*4882a593Smuzhiyun li r7, SLBSHADOW_SAVEAREA + 8 129*4882a593Smuzhiyun 130*4882a593Smuzhiyun .rept SLB_NUM_BOLTED 131*4882a593Smuzhiyun LDX_BE r10, r11, r8 132*4882a593Smuzhiyun cmpdi r10, 0 133*4882a593Smuzhiyun beq 1f 134*4882a593Smuzhiyun LDX_BE r9, r11, r7 135*4882a593Smuzhiyun slbmte r9, r10 136*4882a593Smuzhiyun1: addi r7, r7, SHADOW_SLB_ENTRY_LEN 137*4882a593Smuzhiyun addi r8, r8, SHADOW_SLB_ENTRY_LEN 138*4882a593Smuzhiyun .endr 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun isync 141*4882a593Smuzhiyun sync 142*4882a593Smuzhiyun 143*4882a593Smuzhiyunslb_do_exit: 144*4882a593Smuzhiyun 145*4882a593Smuzhiyun.endm 146