xref: /OK3568_Linux_fs/kernel/arch/mips/kvm/dyntrans.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9*4882a593Smuzhiyun  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/highmem.h>
15*4882a593Smuzhiyun #include <linux/kvm_host.h>
16*4882a593Smuzhiyun #include <linux/uaccess.h>
17*4882a593Smuzhiyun #include <linux/vmalloc.h>
18*4882a593Smuzhiyun #include <linux/fs.h>
19*4882a593Smuzhiyun #include <linux/memblock.h>
20*4882a593Smuzhiyun #include <asm/cacheflush.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "commpage.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun  * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
26*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
27*4882a593Smuzhiyun  * @opc:	PC of instruction to replace.
28*4882a593Smuzhiyun  * @replace:	Instruction to write
29*4882a593Smuzhiyun  */
kvm_mips_trans_replace(struct kvm_vcpu * vcpu,u32 * opc,union mips_instruction replace)30*4882a593Smuzhiyun static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
31*4882a593Smuzhiyun 				  union mips_instruction replace)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	unsigned long vaddr = (unsigned long)opc;
34*4882a593Smuzhiyun 	int err;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun retry:
37*4882a593Smuzhiyun 	/* The GVA page table is still active so use the Linux TLB handlers */
38*4882a593Smuzhiyun 	kvm_trap_emul_gva_lockless_begin(vcpu);
39*4882a593Smuzhiyun 	err = put_user(replace.word, opc);
40*4882a593Smuzhiyun 	kvm_trap_emul_gva_lockless_end(vcpu);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (unlikely(err)) {
43*4882a593Smuzhiyun 		/*
44*4882a593Smuzhiyun 		 * We write protect clean pages in GVA page table so normal
45*4882a593Smuzhiyun 		 * Linux TLB mod handler doesn't silently dirty the page.
46*4882a593Smuzhiyun 		 * Its also possible we raced with a GVA invalidation.
47*4882a593Smuzhiyun 		 * Try to force the page to become dirty.
48*4882a593Smuzhiyun 		 */
49*4882a593Smuzhiyun 		err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
50*4882a593Smuzhiyun 		if (unlikely(err)) {
51*4882a593Smuzhiyun 			kvm_info("%s: Address unwriteable: %p\n",
52*4882a593Smuzhiyun 				 __func__, opc);
53*4882a593Smuzhiyun 			return -EFAULT;
54*4882a593Smuzhiyun 		}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 		/*
57*4882a593Smuzhiyun 		 * Try again. This will likely trigger a TLB refill, which will
58*4882a593Smuzhiyun 		 * fetch the new dirty entry from the GVA page table, which
59*4882a593Smuzhiyun 		 * should then succeed.
60*4882a593Smuzhiyun 		 */
61*4882a593Smuzhiyun 		goto retry;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 	__local_flush_icache_user_range(vaddr, vaddr + 4);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return 0;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
kvm_mips_trans_cache_index(union mips_instruction inst,u32 * opc,struct kvm_vcpu * vcpu)68*4882a593Smuzhiyun int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
69*4882a593Smuzhiyun 			       struct kvm_vcpu *vcpu)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	union mips_instruction nop_inst = { 0 };
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* Replace the CACHE instruction, with a NOP */
74*4882a593Smuzhiyun 	return kvm_mips_trans_replace(vcpu, opc, nop_inst);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun  * Address based CACHE instructions are transformed into synci(s). A little
79*4882a593Smuzhiyun  * heavy for just D-cache invalidates, but avoids an expensive trap
80*4882a593Smuzhiyun  */
kvm_mips_trans_cache_va(union mips_instruction inst,u32 * opc,struct kvm_vcpu * vcpu)81*4882a593Smuzhiyun int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
82*4882a593Smuzhiyun 			    struct kvm_vcpu *vcpu)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	union mips_instruction synci_inst = { 0 };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	synci_inst.i_format.opcode = bcond_op;
87*4882a593Smuzhiyun 	synci_inst.i_format.rs = inst.i_format.rs;
88*4882a593Smuzhiyun 	synci_inst.i_format.rt = synci_op;
89*4882a593Smuzhiyun 	if (cpu_has_mips_r6)
90*4882a593Smuzhiyun 		synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
91*4882a593Smuzhiyun 	else
92*4882a593Smuzhiyun 		synci_inst.i_format.simmediate = inst.i_format.simmediate;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	return kvm_mips_trans_replace(vcpu, opc, synci_inst);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
kvm_mips_trans_mfc0(union mips_instruction inst,u32 * opc,struct kvm_vcpu * vcpu)97*4882a593Smuzhiyun int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
98*4882a593Smuzhiyun 			struct kvm_vcpu *vcpu)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	union mips_instruction mfc0_inst = { 0 };
101*4882a593Smuzhiyun 	u32 rd, sel;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	rd = inst.c0r_format.rd;
104*4882a593Smuzhiyun 	sel = inst.c0r_format.sel;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (rd == MIPS_CP0_ERRCTL && sel == 0) {
107*4882a593Smuzhiyun 		mfc0_inst.r_format.opcode = spec_op;
108*4882a593Smuzhiyun 		mfc0_inst.r_format.rd = inst.c0r_format.rt;
109*4882a593Smuzhiyun 		mfc0_inst.r_format.func = add_op;
110*4882a593Smuzhiyun 	} else {
111*4882a593Smuzhiyun 		mfc0_inst.i_format.opcode = lw_op;
112*4882a593Smuzhiyun 		mfc0_inst.i_format.rt = inst.c0r_format.rt;
113*4882a593Smuzhiyun 		mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
114*4882a593Smuzhiyun 			offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
115*4882a593Smuzhiyun #ifdef CONFIG_CPU_BIG_ENDIAN
116*4882a593Smuzhiyun 		if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
117*4882a593Smuzhiyun 			mfc0_inst.i_format.simmediate |= 4;
118*4882a593Smuzhiyun #endif
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
kvm_mips_trans_mtc0(union mips_instruction inst,u32 * opc,struct kvm_vcpu * vcpu)124*4882a593Smuzhiyun int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
125*4882a593Smuzhiyun 			struct kvm_vcpu *vcpu)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	union mips_instruction mtc0_inst = { 0 };
128*4882a593Smuzhiyun 	u32 rd, sel;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	rd = inst.c0r_format.rd;
131*4882a593Smuzhiyun 	sel = inst.c0r_format.sel;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	mtc0_inst.i_format.opcode = sw_op;
134*4882a593Smuzhiyun 	mtc0_inst.i_format.rt = inst.c0r_format.rt;
135*4882a593Smuzhiyun 	mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
136*4882a593Smuzhiyun 		offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
137*4882a593Smuzhiyun #ifdef CONFIG_CPU_BIG_ENDIAN
138*4882a593Smuzhiyun 	if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
139*4882a593Smuzhiyun 		mtc0_inst.i_format.simmediate |= 4;
140*4882a593Smuzhiyun #endif
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
143*4882a593Smuzhiyun }
144