1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (C) 2012-2015 - ARM Ltd 4*4882a593Smuzhiyun * Author: Marc Zyngier <marc.zyngier@arm.com> 5*4882a593Smuzhiyun */ 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #include <hyp/adjust_pc.h> 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun #include <linux/compiler.h> 10*4882a593Smuzhiyun #include <linux/irqchip/arm-gic.h> 11*4882a593Smuzhiyun #include <linux/kvm_host.h> 12*4882a593Smuzhiyun #include <linux/swab.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #include <asm/kvm_emulate.h> 15*4882a593Smuzhiyun #include <asm/kvm_hyp.h> 16*4882a593Smuzhiyun #include <asm/kvm_mmu.h> 17*4882a593Smuzhiyun __is_be(struct kvm_vcpu * vcpu)18*4882a593Smuzhiyunstatic bool __is_be(struct kvm_vcpu *vcpu) 19*4882a593Smuzhiyun { 20*4882a593Smuzhiyun if (vcpu_mode_is_32bit(vcpu)) 21*4882a593Smuzhiyun return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT); 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE); 24*4882a593Smuzhiyun } 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun /* 27*4882a593Smuzhiyun * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the 28*4882a593Smuzhiyun * guest. 29*4882a593Smuzhiyun * 30*4882a593Smuzhiyun * @vcpu: the offending vcpu 31*4882a593Smuzhiyun * 32*4882a593Smuzhiyun * Returns: 33*4882a593Smuzhiyun * 1: GICV access successfully performed 34*4882a593Smuzhiyun * 0: Not a GICV access 35*4882a593Smuzhiyun * -1: Illegal GICV access successfully performed 36*4882a593Smuzhiyun */ __vgic_v2_perform_cpuif_access(struct kvm_vcpu * vcpu)37*4882a593Smuzhiyunint __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) 38*4882a593Smuzhiyun { 39*4882a593Smuzhiyun struct kvm *kvm = kern_hyp_va(vcpu->kvm); 40*4882a593Smuzhiyun struct vgic_dist *vgic = &kvm->arch.vgic; 41*4882a593Smuzhiyun phys_addr_t fault_ipa; 42*4882a593Smuzhiyun void __iomem *addr; 43*4882a593Smuzhiyun int rd; 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun /* Build the full address */ 46*4882a593Smuzhiyun fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 47*4882a593Smuzhiyun fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun /* If not for GICV, move on */ 50*4882a593Smuzhiyun if (fault_ipa < vgic->vgic_cpu_base || 51*4882a593Smuzhiyun fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE)) 52*4882a593Smuzhiyun return 0; 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun /* Reject anything but a 32bit access */ 55*4882a593Smuzhiyun if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) { 56*4882a593Smuzhiyun __kvm_skip_instr(vcpu); 57*4882a593Smuzhiyun return -1; 58*4882a593Smuzhiyun } 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun /* Not aligned? Don't bother */ 61*4882a593Smuzhiyun if (fault_ipa & 3) { 62*4882a593Smuzhiyun __kvm_skip_instr(vcpu); 63*4882a593Smuzhiyun return -1; 64*4882a593Smuzhiyun } 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun rd = kvm_vcpu_dabt_get_rd(vcpu); 67*4882a593Smuzhiyun addr = kvm_vgic_global_state.vcpu_hyp_va; 68*4882a593Smuzhiyun addr += fault_ipa - vgic->vgic_cpu_base; 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun if (kvm_vcpu_dabt_iswrite(vcpu)) { 71*4882a593Smuzhiyun u32 data = vcpu_get_reg(vcpu, rd); 72*4882a593Smuzhiyun if (__is_be(vcpu)) { 73*4882a593Smuzhiyun /* guest pre-swabbed data, undo this for writel() */ 74*4882a593Smuzhiyun data = __kvm_swab32(data); 75*4882a593Smuzhiyun } 76*4882a593Smuzhiyun writel_relaxed(data, addr); 77*4882a593Smuzhiyun } else { 78*4882a593Smuzhiyun u32 data = readl_relaxed(addr); 79*4882a593Smuzhiyun if (__is_be(vcpu)) { 80*4882a593Smuzhiyun /* guest expects swabbed data */ 81*4882a593Smuzhiyun data = __kvm_swab32(data); 82*4882a593Smuzhiyun } 83*4882a593Smuzhiyun vcpu_set_reg(vcpu, rd, data); 84*4882a593Smuzhiyun } 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun __kvm_skip_instr(vcpu); 87*4882a593Smuzhiyun 88*4882a593Smuzhiyun return 1; 89*4882a593Smuzhiyun } 90