xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_hv_tm_builtin.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/kvm_host.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
9*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
10*4882a593Smuzhiyun #include <asm/kvm_book3s_64.h>
11*4882a593Smuzhiyun #include <asm/reg.h>
12*4882a593Smuzhiyun #include <asm/ppc-opcode.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * This handles the cases where the guest is in real suspend mode
16*4882a593Smuzhiyun  * and we want to get back to the guest without dooming the transaction.
17*4882a593Smuzhiyun  * The caller has checked that the guest is in real-suspend mode
18*4882a593Smuzhiyun  * (MSR[TS] = S and the fake-suspend flag is not set).
19*4882a593Smuzhiyun  */
kvmhv_p9_tm_emulation_early(struct kvm_vcpu * vcpu)20*4882a593Smuzhiyun int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	u32 instr = vcpu->arch.emul_inst;
23*4882a593Smuzhiyun 	u64 newmsr, msr, bescr;
24*4882a593Smuzhiyun 	int rs;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	/*
27*4882a593Smuzhiyun 	 * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
28*4882a593Smuzhiyun 	 * in these instructions, so masking bit 31 out doesn't change these
29*4882a593Smuzhiyun 	 * instructions. For the tsr. instruction if bit 31 = 0 then it is per
30*4882a593Smuzhiyun 	 * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
31*4882a593Smuzhiyun 	 * Forms, informs specifically that ignoring bit 31 is an acceptable way
32*4882a593Smuzhiyun 	 * to handle TM-related invalid forms that have bit 31 = 0. Moreover,
33*4882a593Smuzhiyun 	 * for emulation purposes both forms (w/ and wo/ bit 31 set) can
34*4882a593Smuzhiyun 	 * generate a softpatch interrupt. Hence both forms are handled below
35*4882a593Smuzhiyun 	 * for tsr. to make them behave the same way.
36*4882a593Smuzhiyun 	 */
37*4882a593Smuzhiyun 	switch (instr & PO_XOP_OPCODE_MASK) {
38*4882a593Smuzhiyun 	case PPC_INST_RFID:
39*4882a593Smuzhiyun 		/* XXX do we need to check for PR=0 here? */
40*4882a593Smuzhiyun 		newmsr = vcpu->arch.shregs.srr1;
41*4882a593Smuzhiyun 		/* should only get here for Sx -> T1 transition */
42*4882a593Smuzhiyun 		if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM)))
43*4882a593Smuzhiyun 			return 0;
44*4882a593Smuzhiyun 		newmsr = sanitize_msr(newmsr);
45*4882a593Smuzhiyun 		vcpu->arch.shregs.msr = newmsr;
46*4882a593Smuzhiyun 		vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
47*4882a593Smuzhiyun 		vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
48*4882a593Smuzhiyun 		return 1;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	case PPC_INST_RFEBB:
51*4882a593Smuzhiyun 		/* check for PR=1 and arch 2.06 bit set in PCR */
52*4882a593Smuzhiyun 		msr = vcpu->arch.shregs.msr;
53*4882a593Smuzhiyun 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206))
54*4882a593Smuzhiyun 			return 0;
55*4882a593Smuzhiyun 		/* check EBB facility is available */
56*4882a593Smuzhiyun 		if (!(vcpu->arch.hfscr & HFSCR_EBB) ||
57*4882a593Smuzhiyun 		    ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB)))
58*4882a593Smuzhiyun 			return 0;
59*4882a593Smuzhiyun 		bescr = mfspr(SPRN_BESCR);
60*4882a593Smuzhiyun 		/* expect to see a S->T transition requested */
61*4882a593Smuzhiyun 		if (((bescr >> 30) & 3) != 2)
62*4882a593Smuzhiyun 			return 0;
63*4882a593Smuzhiyun 		bescr &= ~BESCR_GE;
64*4882a593Smuzhiyun 		if (instr & (1 << 11))
65*4882a593Smuzhiyun 			bescr |= BESCR_GE;
66*4882a593Smuzhiyun 		mtspr(SPRN_BESCR, bescr);
67*4882a593Smuzhiyun 		msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
68*4882a593Smuzhiyun 		vcpu->arch.shregs.msr = msr;
69*4882a593Smuzhiyun 		vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
70*4882a593Smuzhiyun 		vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
71*4882a593Smuzhiyun 		return 1;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	case PPC_INST_MTMSRD:
74*4882a593Smuzhiyun 		/* XXX do we need to check for PR=0 here? */
75*4882a593Smuzhiyun 		rs = (instr >> 21) & 0x1f;
76*4882a593Smuzhiyun 		newmsr = kvmppc_get_gpr(vcpu, rs);
77*4882a593Smuzhiyun 		msr = vcpu->arch.shregs.msr;
78*4882a593Smuzhiyun 		/* check this is a Sx -> T1 transition */
79*4882a593Smuzhiyun 		if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM)))
80*4882a593Smuzhiyun 			return 0;
81*4882a593Smuzhiyun 		/* mtmsrd doesn't change LE */
82*4882a593Smuzhiyun 		newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
83*4882a593Smuzhiyun 		newmsr = sanitize_msr(newmsr);
84*4882a593Smuzhiyun 		vcpu->arch.shregs.msr = newmsr;
85*4882a593Smuzhiyun 		return 1;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* ignore bit 31, see comment above */
88*4882a593Smuzhiyun 	case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
89*4882a593Smuzhiyun 		/* we know the MSR has the TS field = S (0b01) here */
90*4882a593Smuzhiyun 		msr = vcpu->arch.shregs.msr;
91*4882a593Smuzhiyun 		/* check for PR=1 and arch 2.06 bit set in PCR */
92*4882a593Smuzhiyun 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206))
93*4882a593Smuzhiyun 			return 0;
94*4882a593Smuzhiyun 		/* check for TM disabled in the HFSCR or MSR */
95*4882a593Smuzhiyun 		if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM))
96*4882a593Smuzhiyun 			return 0;
97*4882a593Smuzhiyun 		/* L=1 => tresume => set TS to T (0b10) */
98*4882a593Smuzhiyun 		if (instr & (1 << 21))
99*4882a593Smuzhiyun 			vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
100*4882a593Smuzhiyun 		/* Set CR0 to 0b0010 */
101*4882a593Smuzhiyun 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
102*4882a593Smuzhiyun 			0x20000000;
103*4882a593Smuzhiyun 		return 1;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * This is called when we are returning to a guest in TM transactional
111*4882a593Smuzhiyun  * state.  We roll the guest state back to the checkpointed state.
112*4882a593Smuzhiyun  */
kvmhv_emulate_tm_rollback(struct kvm_vcpu * vcpu)113*4882a593Smuzhiyun void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	vcpu->arch.shregs.msr &= ~MSR_TS_MASK;	/* go to N state */
116*4882a593Smuzhiyun 	vcpu->arch.regs.nip = vcpu->arch.tfhar;
117*4882a593Smuzhiyun 	copy_from_checkpoint(vcpu);
118*4882a593Smuzhiyun 	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
119*4882a593Smuzhiyun }
120