1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/string.h>
9*4882a593Smuzhiyun #include <linux/kvm.h>
10*4882a593Smuzhiyun #include <linux/kvm_host.h>
11*4882a593Smuzhiyun #include <linux/highmem.h>
12*4882a593Smuzhiyun #include <linux/gfp.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/hugetlb.h>
15*4882a593Smuzhiyun #include <linux/vmalloc.h>
16*4882a593Smuzhiyun #include <linux/srcu.h>
17*4882a593Smuzhiyun #include <linux/anon_inodes.h>
18*4882a593Smuzhiyun #include <linux/file.h>
19*4882a593Smuzhiyun #include <linux/debugfs.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
22*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
23*4882a593Smuzhiyun #include <asm/book3s/64/mmu-hash.h>
24*4882a593Smuzhiyun #include <asm/hvcall.h>
25*4882a593Smuzhiyun #include <asm/synch.h>
26*4882a593Smuzhiyun #include <asm/ppc-opcode.h>
27*4882a593Smuzhiyun #include <asm/cputable.h>
28*4882a593Smuzhiyun #include <asm/pte-walk.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include "trace_hv.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun //#define DEBUG_RESIZE_HPT 1
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #ifdef DEBUG_RESIZE_HPT
35*4882a593Smuzhiyun #define resize_hpt_debug(resize, ...) \
36*4882a593Smuzhiyun do { \
37*4882a593Smuzhiyun printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \
38*4882a593Smuzhiyun printk(__VA_ARGS__); \
39*4882a593Smuzhiyun } while (0)
40*4882a593Smuzhiyun #else
41*4882a593Smuzhiyun #define resize_hpt_debug(resize, ...) \
42*4882a593Smuzhiyun do { } while (0)
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
46*4882a593Smuzhiyun long pte_index, unsigned long pteh,
47*4882a593Smuzhiyun unsigned long ptel, unsigned long *pte_idx_ret);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun struct kvm_resize_hpt {
50*4882a593Smuzhiyun /* These fields read-only after init */
51*4882a593Smuzhiyun struct kvm *kvm;
52*4882a593Smuzhiyun struct work_struct work;
53*4882a593Smuzhiyun u32 order;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* These fields protected by kvm->arch.mmu_setup_lock */
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Possible values and their usage:
58*4882a593Smuzhiyun * <0 an error occurred during allocation,
59*4882a593Smuzhiyun * -EBUSY allocation is in the progress,
60*4882a593Smuzhiyun * 0 allocation made successfuly.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun int error;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Private to the work thread, until error != -EBUSY,
65*4882a593Smuzhiyun * then protected by kvm->arch.mmu_setup_lock.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun struct kvm_hpt_info hpt;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
kvmppc_allocate_hpt(struct kvm_hpt_info * info,u32 order)70*4882a593Smuzhiyun int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun unsigned long hpt = 0;
73*4882a593Smuzhiyun int cma = 0;
74*4882a593Smuzhiyun struct page *page = NULL;
75*4882a593Smuzhiyun struct revmap_entry *rev;
76*4882a593Smuzhiyun unsigned long npte;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER))
79*4882a593Smuzhiyun return -EINVAL;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
82*4882a593Smuzhiyun if (page) {
83*4882a593Smuzhiyun hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
84*4882a593Smuzhiyun memset((void *)hpt, 0, (1ul << order));
85*4882a593Smuzhiyun cma = 1;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (!hpt)
89*4882a593Smuzhiyun hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL
90*4882a593Smuzhiyun |__GFP_NOWARN, order - PAGE_SHIFT);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (!hpt)
93*4882a593Smuzhiyun return -ENOMEM;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* HPTEs are 2**4 bytes long */
96*4882a593Smuzhiyun npte = 1ul << (order - 4);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* Allocate reverse map array */
99*4882a593Smuzhiyun rev = vmalloc(array_size(npte, sizeof(struct revmap_entry)));
100*4882a593Smuzhiyun if (!rev) {
101*4882a593Smuzhiyun if (cma)
102*4882a593Smuzhiyun kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
103*4882a593Smuzhiyun else
104*4882a593Smuzhiyun free_pages(hpt, order - PAGE_SHIFT);
105*4882a593Smuzhiyun return -ENOMEM;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun info->order = order;
109*4882a593Smuzhiyun info->virt = hpt;
110*4882a593Smuzhiyun info->cma = cma;
111*4882a593Smuzhiyun info->rev = rev;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun return 0;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
kvmppc_set_hpt(struct kvm * kvm,struct kvm_hpt_info * info)116*4882a593Smuzhiyun void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun atomic64_set(&kvm->arch.mmio_update, 0);
119*4882a593Smuzhiyun kvm->arch.hpt = *info;
120*4882a593Smuzhiyun kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n",
123*4882a593Smuzhiyun info->virt, (long)info->order, kvm->arch.lpid);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
kvmppc_alloc_reset_hpt(struct kvm * kvm,int order)126*4882a593Smuzhiyun long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun long err = -EBUSY;
129*4882a593Smuzhiyun struct kvm_hpt_info info;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun mutex_lock(&kvm->arch.mmu_setup_lock);
132*4882a593Smuzhiyun if (kvm->arch.mmu_ready) {
133*4882a593Smuzhiyun kvm->arch.mmu_ready = 0;
134*4882a593Smuzhiyun /* order mmu_ready vs. vcpus_running */
135*4882a593Smuzhiyun smp_mb();
136*4882a593Smuzhiyun if (atomic_read(&kvm->arch.vcpus_running)) {
137*4882a593Smuzhiyun kvm->arch.mmu_ready = 1;
138*4882a593Smuzhiyun goto out;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun if (kvm_is_radix(kvm)) {
142*4882a593Smuzhiyun err = kvmppc_switch_mmu_to_hpt(kvm);
143*4882a593Smuzhiyun if (err)
144*4882a593Smuzhiyun goto out;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (kvm->arch.hpt.order == order) {
148*4882a593Smuzhiyun /* We already have a suitable HPT */
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* Set the entire HPT to 0, i.e. invalid HPTEs */
151*4882a593Smuzhiyun memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * Reset all the reverse-mapping chains for all memslots
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun kvmppc_rmap_reset(kvm);
156*4882a593Smuzhiyun err = 0;
157*4882a593Smuzhiyun goto out;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (kvm->arch.hpt.virt) {
161*4882a593Smuzhiyun kvmppc_free_hpt(&kvm->arch.hpt);
162*4882a593Smuzhiyun kvmppc_rmap_reset(kvm);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun err = kvmppc_allocate_hpt(&info, order);
166*4882a593Smuzhiyun if (err < 0)
167*4882a593Smuzhiyun goto out;
168*4882a593Smuzhiyun kvmppc_set_hpt(kvm, &info);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun out:
171*4882a593Smuzhiyun if (err == 0)
172*4882a593Smuzhiyun /* Ensure that each vcpu will flush its TLB on next entry. */
173*4882a593Smuzhiyun cpumask_setall(&kvm->arch.need_tlb_flush);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun mutex_unlock(&kvm->arch.mmu_setup_lock);
176*4882a593Smuzhiyun return err;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
kvmppc_free_hpt(struct kvm_hpt_info * info)179*4882a593Smuzhiyun void kvmppc_free_hpt(struct kvm_hpt_info *info)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun vfree(info->rev);
182*4882a593Smuzhiyun info->rev = NULL;
183*4882a593Smuzhiyun if (info->cma)
184*4882a593Smuzhiyun kvm_free_hpt_cma(virt_to_page(info->virt),
185*4882a593Smuzhiyun 1 << (info->order - PAGE_SHIFT));
186*4882a593Smuzhiyun else if (info->virt)
187*4882a593Smuzhiyun free_pages(info->virt, info->order - PAGE_SHIFT);
188*4882a593Smuzhiyun info->virt = 0;
189*4882a593Smuzhiyun info->order = 0;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
hpte0_pgsize_encoding(unsigned long pgsize)193*4882a593Smuzhiyun static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
hpte1_pgsize_encoding(unsigned long pgsize)199*4882a593Smuzhiyun static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun return (pgsize == 0x10000) ? 0x1000 : 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
kvmppc_map_vrma(struct kvm_vcpu * vcpu,struct kvm_memory_slot * memslot,unsigned long porder)204*4882a593Smuzhiyun void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
205*4882a593Smuzhiyun unsigned long porder)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun unsigned long i;
208*4882a593Smuzhiyun unsigned long npages;
209*4882a593Smuzhiyun unsigned long hp_v, hp_r;
210*4882a593Smuzhiyun unsigned long addr, hash;
211*4882a593Smuzhiyun unsigned long psize;
212*4882a593Smuzhiyun unsigned long hp0, hp1;
213*4882a593Smuzhiyun unsigned long idx_ret;
214*4882a593Smuzhiyun long ret;
215*4882a593Smuzhiyun struct kvm *kvm = vcpu->kvm;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun psize = 1ul << porder;
218*4882a593Smuzhiyun npages = memslot->npages >> (porder - PAGE_SHIFT);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* VRMA can't be > 1TB */
221*4882a593Smuzhiyun if (npages > 1ul << (40 - porder))
222*4882a593Smuzhiyun npages = 1ul << (40 - porder);
223*4882a593Smuzhiyun /* Can't use more than 1 HPTE per HPTEG */
224*4882a593Smuzhiyun if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1)
225*4882a593Smuzhiyun npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
228*4882a593Smuzhiyun HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
229*4882a593Smuzhiyun hp1 = hpte1_pgsize_encoding(psize) |
230*4882a593Smuzhiyun HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun for (i = 0; i < npages; ++i) {
233*4882a593Smuzhiyun addr = i << porder;
234*4882a593Smuzhiyun /* can't use hpt_hash since va > 64 bits */
235*4882a593Smuzhiyun hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25)))
236*4882a593Smuzhiyun & kvmppc_hpt_mask(&kvm->arch.hpt);
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * We assume that the hash table is empty and no
239*4882a593Smuzhiyun * vcpus are using it at this stage. Since we create
240*4882a593Smuzhiyun * at most one HPTE per HPTEG, we just assume entry 7
241*4882a593Smuzhiyun * is available and use it.
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun hash = (hash << 3) + 7;
244*4882a593Smuzhiyun hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
245*4882a593Smuzhiyun hp_r = hp1 | addr;
246*4882a593Smuzhiyun ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
247*4882a593Smuzhiyun &idx_ret);
248*4882a593Smuzhiyun if (ret != H_SUCCESS) {
249*4882a593Smuzhiyun pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
250*4882a593Smuzhiyun addr, ret);
251*4882a593Smuzhiyun break;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
kvmppc_mmu_hv_init(void)256*4882a593Smuzhiyun int kvmppc_mmu_hv_init(void)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun unsigned long host_lpid, rsvd_lpid;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
261*4882a593Smuzhiyun return -EINVAL;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun host_lpid = 0;
264*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_HVMODE))
265*4882a593Smuzhiyun host_lpid = mfspr(SPRN_LPID);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* POWER8 and above have 12-bit LPIDs (10-bit in POWER7) */
268*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_207S))
269*4882a593Smuzhiyun rsvd_lpid = LPID_RSVD;
270*4882a593Smuzhiyun else
271*4882a593Smuzhiyun rsvd_lpid = LPID_RSVD_POWER7;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun kvmppc_init_lpid(rsvd_lpid + 1);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun kvmppc_claim_lpid(host_lpid);
276*4882a593Smuzhiyun /* rsvd_lpid is reserved for use in partition switching */
277*4882a593Smuzhiyun kvmppc_claim_lpid(rsvd_lpid);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
kvmppc_virtmode_do_h_enter(struct kvm * kvm,unsigned long flags,long pte_index,unsigned long pteh,unsigned long ptel,unsigned long * pte_idx_ret)282*4882a593Smuzhiyun static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
283*4882a593Smuzhiyun long pte_index, unsigned long pteh,
284*4882a593Smuzhiyun unsigned long ptel, unsigned long *pte_idx_ret)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun long ret;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun preempt_disable();
289*4882a593Smuzhiyun ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
290*4882a593Smuzhiyun kvm->mm->pgd, false, pte_idx_ret);
291*4882a593Smuzhiyun preempt_enable();
292*4882a593Smuzhiyun if (ret == H_TOO_HARD) {
293*4882a593Smuzhiyun /* this can't happen */
294*4882a593Smuzhiyun pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
295*4882a593Smuzhiyun ret = H_RESOURCE; /* or something */
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun return ret;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu * vcpu,gva_t eaddr)301*4882a593Smuzhiyun static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
302*4882a593Smuzhiyun gva_t eaddr)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun u64 mask;
305*4882a593Smuzhiyun int i;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun for (i = 0; i < vcpu->arch.slb_nr; i++) {
308*4882a593Smuzhiyun if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
309*4882a593Smuzhiyun continue;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
312*4882a593Smuzhiyun mask = ESID_MASK_1T;
313*4882a593Smuzhiyun else
314*4882a593Smuzhiyun mask = ESID_MASK;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
317*4882a593Smuzhiyun return &vcpu->arch.slb[i];
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun return NULL;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
kvmppc_mmu_get_real_addr(unsigned long v,unsigned long r,unsigned long ea)322*4882a593Smuzhiyun static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
323*4882a593Smuzhiyun unsigned long ea)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun unsigned long ra_mask;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun ra_mask = kvmppc_actual_pgsz(v, r) - 1;
328*4882a593Smuzhiyun return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,bool data,bool iswrite)331*4882a593Smuzhiyun static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
332*4882a593Smuzhiyun struct kvmppc_pte *gpte, bool data, bool iswrite)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct kvm *kvm = vcpu->kvm;
335*4882a593Smuzhiyun struct kvmppc_slb *slbe;
336*4882a593Smuzhiyun unsigned long slb_v;
337*4882a593Smuzhiyun unsigned long pp, key;
338*4882a593Smuzhiyun unsigned long v, orig_v, gr;
339*4882a593Smuzhiyun __be64 *hptep;
340*4882a593Smuzhiyun long int index;
341*4882a593Smuzhiyun int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (kvm_is_radix(vcpu->kvm))
344*4882a593Smuzhiyun return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* Get SLB entry */
347*4882a593Smuzhiyun if (virtmode) {
348*4882a593Smuzhiyun slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
349*4882a593Smuzhiyun if (!slbe)
350*4882a593Smuzhiyun return -EINVAL;
351*4882a593Smuzhiyun slb_v = slbe->origv;
352*4882a593Smuzhiyun } else {
353*4882a593Smuzhiyun /* real mode access */
354*4882a593Smuzhiyun slb_v = vcpu->kvm->arch.vrma_slb_v;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun preempt_disable();
358*4882a593Smuzhiyun /* Find the HPTE in the hash table */
359*4882a593Smuzhiyun index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
360*4882a593Smuzhiyun HPTE_V_VALID | HPTE_V_ABSENT);
361*4882a593Smuzhiyun if (index < 0) {
362*4882a593Smuzhiyun preempt_enable();
363*4882a593Smuzhiyun return -ENOENT;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
366*4882a593Smuzhiyun v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
367*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300))
368*4882a593Smuzhiyun v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
369*4882a593Smuzhiyun gr = kvm->arch.hpt.rev[index].guest_rpte;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun unlock_hpte(hptep, orig_v);
372*4882a593Smuzhiyun preempt_enable();
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun gpte->eaddr = eaddr;
375*4882a593Smuzhiyun gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Get PP bits and key for permission check */
378*4882a593Smuzhiyun pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
379*4882a593Smuzhiyun key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
380*4882a593Smuzhiyun key &= slb_v;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* Calculate permissions */
383*4882a593Smuzhiyun gpte->may_read = hpte_read_permission(pp, key);
384*4882a593Smuzhiyun gpte->may_write = hpte_write_permission(pp, key);
385*4882a593Smuzhiyun gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* Storage key permission check for POWER7 */
388*4882a593Smuzhiyun if (data && virtmode) {
389*4882a593Smuzhiyun int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
390*4882a593Smuzhiyun if (amrfield & 1)
391*4882a593Smuzhiyun gpte->may_read = 0;
392*4882a593Smuzhiyun if (amrfield & 2)
393*4882a593Smuzhiyun gpte->may_write = 0;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* Get the guest physical address */
397*4882a593Smuzhiyun gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
398*4882a593Smuzhiyun return 0;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * Quick test for whether an instruction is a load or a store.
403*4882a593Smuzhiyun * If the instruction is a load or a store, then this will indicate
404*4882a593Smuzhiyun * which it is, at least on server processors. (Embedded processors
405*4882a593Smuzhiyun * have some external PID instructions that don't follow the rule
406*4882a593Smuzhiyun * embodied here.) If the instruction isn't a load or store, then
407*4882a593Smuzhiyun * this doesn't return anything useful.
408*4882a593Smuzhiyun */
instruction_is_store(unsigned int instr)409*4882a593Smuzhiyun static int instruction_is_store(unsigned int instr)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun unsigned int mask;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun mask = 0x10000000;
414*4882a593Smuzhiyun if ((instr & 0xfc000000) == 0x7c000000)
415*4882a593Smuzhiyun mask = 0x100; /* major opcode 31 */
416*4882a593Smuzhiyun return (instr & mask) != 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
kvmppc_hv_emulate_mmio(struct kvm_vcpu * vcpu,unsigned long gpa,gva_t ea,int is_store)419*4882a593Smuzhiyun int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
420*4882a593Smuzhiyun unsigned long gpa, gva_t ea, int is_store)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun u32 last_inst;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun * Fast path - check if the guest physical address corresponds to a
426*4882a593Smuzhiyun * device on the FAST_MMIO_BUS, if so we can avoid loading the
427*4882a593Smuzhiyun * instruction all together, then we can just handle it and return.
428*4882a593Smuzhiyun */
429*4882a593Smuzhiyun if (is_store) {
430*4882a593Smuzhiyun int idx, ret;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun idx = srcu_read_lock(&vcpu->kvm->srcu);
433*4882a593Smuzhiyun ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0,
434*4882a593Smuzhiyun NULL);
435*4882a593Smuzhiyun srcu_read_unlock(&vcpu->kvm->srcu, idx);
436*4882a593Smuzhiyun if (!ret) {
437*4882a593Smuzhiyun kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
438*4882a593Smuzhiyun return RESUME_GUEST;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /*
443*4882a593Smuzhiyun * If we fail, we just return to the guest and try executing it again.
444*4882a593Smuzhiyun */
445*4882a593Smuzhiyun if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
446*4882a593Smuzhiyun EMULATE_DONE)
447*4882a593Smuzhiyun return RESUME_GUEST;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * WARNING: We do not know for sure whether the instruction we just
451*4882a593Smuzhiyun * read from memory is the same that caused the fault in the first
452*4882a593Smuzhiyun * place. If the instruction we read is neither an load or a store,
453*4882a593Smuzhiyun * then it can't access memory, so we don't need to worry about
454*4882a593Smuzhiyun * enforcing access permissions. So, assuming it is a load or
455*4882a593Smuzhiyun * store, we just check that its direction (load or store) is
456*4882a593Smuzhiyun * consistent with the original fault, since that's what we
457*4882a593Smuzhiyun * checked the access permissions against. If there is a mismatch
458*4882a593Smuzhiyun * we just return and retry the instruction.
459*4882a593Smuzhiyun */
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (instruction_is_store(last_inst) != !!is_store)
462*4882a593Smuzhiyun return RESUME_GUEST;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun * Emulated accesses are emulated by looking at the hash for
466*4882a593Smuzhiyun * translation once, then performing the access later. The
467*4882a593Smuzhiyun * translation could be invalidated in the meantime in which
468*4882a593Smuzhiyun * point performing the subsequent memory access on the old
469*4882a593Smuzhiyun * physical address could possibly be a security hole for the
470*4882a593Smuzhiyun * guest (but not the host).
471*4882a593Smuzhiyun *
472*4882a593Smuzhiyun * This is less of an issue for MMIO stores since they aren't
473*4882a593Smuzhiyun * globally visible. It could be an issue for MMIO loads to
474*4882a593Smuzhiyun * a certain extent but we'll ignore it for now.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun vcpu->arch.paddr_accessed = gpa;
478*4882a593Smuzhiyun vcpu->arch.vaddr_accessed = ea;
479*4882a593Smuzhiyun return kvmppc_emulate_mmio(vcpu);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
kvmppc_book3s_hv_page_fault(struct kvm_vcpu * vcpu,unsigned long ea,unsigned long dsisr)482*4882a593Smuzhiyun int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
483*4882a593Smuzhiyun unsigned long ea, unsigned long dsisr)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun struct kvm *kvm = vcpu->kvm;
486*4882a593Smuzhiyun unsigned long hpte[3], r;
487*4882a593Smuzhiyun unsigned long hnow_v, hnow_r;
488*4882a593Smuzhiyun __be64 *hptep;
489*4882a593Smuzhiyun unsigned long mmu_seq, psize, pte_size;
490*4882a593Smuzhiyun unsigned long gpa_base, gfn_base;
491*4882a593Smuzhiyun unsigned long gpa, gfn, hva, pfn, hpa;
492*4882a593Smuzhiyun struct kvm_memory_slot *memslot;
493*4882a593Smuzhiyun unsigned long *rmap;
494*4882a593Smuzhiyun struct revmap_entry *rev;
495*4882a593Smuzhiyun struct page *page;
496*4882a593Smuzhiyun long index, ret;
497*4882a593Smuzhiyun bool is_ci;
498*4882a593Smuzhiyun bool writing, write_ok;
499*4882a593Smuzhiyun unsigned int shift;
500*4882a593Smuzhiyun unsigned long rcbits;
501*4882a593Smuzhiyun long mmio_update;
502*4882a593Smuzhiyun pte_t pte, *ptep;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (kvm_is_radix(kvm))
505*4882a593Smuzhiyun return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /*
508*4882a593Smuzhiyun * Real-mode code has already searched the HPT and found the
509*4882a593Smuzhiyun * entry we're interested in. Lock the entry and check that
510*4882a593Smuzhiyun * it hasn't changed. If it has, just return and re-execute the
511*4882a593Smuzhiyun * instruction.
512*4882a593Smuzhiyun */
513*4882a593Smuzhiyun if (ea != vcpu->arch.pgfault_addr)
514*4882a593Smuzhiyun return RESUME_GUEST;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (vcpu->arch.pgfault_cache) {
517*4882a593Smuzhiyun mmio_update = atomic64_read(&kvm->arch.mmio_update);
518*4882a593Smuzhiyun if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) {
519*4882a593Smuzhiyun r = vcpu->arch.pgfault_cache->rpte;
520*4882a593Smuzhiyun psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0],
521*4882a593Smuzhiyun r);
522*4882a593Smuzhiyun gpa_base = r & HPTE_R_RPN & ~(psize - 1);
523*4882a593Smuzhiyun gfn_base = gpa_base >> PAGE_SHIFT;
524*4882a593Smuzhiyun gpa = gpa_base | (ea & (psize - 1));
525*4882a593Smuzhiyun return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
526*4882a593Smuzhiyun dsisr & DSISR_ISSTORE);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun index = vcpu->arch.pgfault_index;
530*4882a593Smuzhiyun hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
531*4882a593Smuzhiyun rev = &kvm->arch.hpt.rev[index];
532*4882a593Smuzhiyun preempt_disable();
533*4882a593Smuzhiyun while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
534*4882a593Smuzhiyun cpu_relax();
535*4882a593Smuzhiyun hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
536*4882a593Smuzhiyun hpte[1] = be64_to_cpu(hptep[1]);
537*4882a593Smuzhiyun hpte[2] = r = rev->guest_rpte;
538*4882a593Smuzhiyun unlock_hpte(hptep, hpte[0]);
539*4882a593Smuzhiyun preempt_enable();
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
542*4882a593Smuzhiyun hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]);
543*4882a593Smuzhiyun hpte[1] = hpte_new_to_old_r(hpte[1]);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
546*4882a593Smuzhiyun hpte[1] != vcpu->arch.pgfault_hpte[1])
547*4882a593Smuzhiyun return RESUME_GUEST;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* Translate the logical address and get the page */
550*4882a593Smuzhiyun psize = kvmppc_actual_pgsz(hpte[0], r);
551*4882a593Smuzhiyun gpa_base = r & HPTE_R_RPN & ~(psize - 1);
552*4882a593Smuzhiyun gfn_base = gpa_base >> PAGE_SHIFT;
553*4882a593Smuzhiyun gpa = gpa_base | (ea & (psize - 1));
554*4882a593Smuzhiyun gfn = gpa >> PAGE_SHIFT;
555*4882a593Smuzhiyun memslot = gfn_to_memslot(kvm, gfn);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* No memslot means it's an emulated MMIO region */
560*4882a593Smuzhiyun if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
561*4882a593Smuzhiyun return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
562*4882a593Smuzhiyun dsisr & DSISR_ISSTORE);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /*
565*4882a593Smuzhiyun * This should never happen, because of the slot_is_aligned()
566*4882a593Smuzhiyun * check in kvmppc_do_h_enter().
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun if (gfn_base < memslot->base_gfn)
569*4882a593Smuzhiyun return -EFAULT;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* used to check for invalidations in progress */
572*4882a593Smuzhiyun mmu_seq = kvm->mmu_notifier_seq;
573*4882a593Smuzhiyun smp_rmb();
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun ret = -EFAULT;
576*4882a593Smuzhiyun page = NULL;
577*4882a593Smuzhiyun writing = (dsisr & DSISR_ISSTORE) != 0;
578*4882a593Smuzhiyun /* If writing != 0, then the HPTE must allow writing, if we get here */
579*4882a593Smuzhiyun write_ok = writing;
580*4882a593Smuzhiyun hva = gfn_to_hva_memslot(memslot, gfn);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /*
583*4882a593Smuzhiyun * Do a fast check first, since __gfn_to_pfn_memslot doesn't
584*4882a593Smuzhiyun * do it with !atomic && !async, which is how we call it.
585*4882a593Smuzhiyun * We always ask for write permission since the common case
586*4882a593Smuzhiyun * is that the page is writable.
587*4882a593Smuzhiyun */
588*4882a593Smuzhiyun if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
589*4882a593Smuzhiyun write_ok = true;
590*4882a593Smuzhiyun } else {
591*4882a593Smuzhiyun /* Call KVM generic code to do the slow-path check */
592*4882a593Smuzhiyun pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
593*4882a593Smuzhiyun writing, &write_ok);
594*4882a593Smuzhiyun if (is_error_noslot_pfn(pfn))
595*4882a593Smuzhiyun return -EFAULT;
596*4882a593Smuzhiyun page = NULL;
597*4882a593Smuzhiyun if (pfn_valid(pfn)) {
598*4882a593Smuzhiyun page = pfn_to_page(pfn);
599*4882a593Smuzhiyun if (PageReserved(page))
600*4882a593Smuzhiyun page = NULL;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun * Read the PTE from the process' radix tree and use that
606*4882a593Smuzhiyun * so we get the shift and attribute bits.
607*4882a593Smuzhiyun */
608*4882a593Smuzhiyun spin_lock(&kvm->mmu_lock);
609*4882a593Smuzhiyun ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
610*4882a593Smuzhiyun pte = __pte(0);
611*4882a593Smuzhiyun if (ptep)
612*4882a593Smuzhiyun pte = READ_ONCE(*ptep);
613*4882a593Smuzhiyun spin_unlock(&kvm->mmu_lock);
614*4882a593Smuzhiyun /*
615*4882a593Smuzhiyun * If the PTE disappeared temporarily due to a THP
616*4882a593Smuzhiyun * collapse, just return and let the guest try again.
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun if (!pte_present(pte)) {
619*4882a593Smuzhiyun if (page)
620*4882a593Smuzhiyun put_page(page);
621*4882a593Smuzhiyun return RESUME_GUEST;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun hpa = pte_pfn(pte) << PAGE_SHIFT;
624*4882a593Smuzhiyun pte_size = PAGE_SIZE;
625*4882a593Smuzhiyun if (shift)
626*4882a593Smuzhiyun pte_size = 1ul << shift;
627*4882a593Smuzhiyun is_ci = pte_ci(pte);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun if (psize > pte_size)
630*4882a593Smuzhiyun goto out_put;
631*4882a593Smuzhiyun if (pte_size > psize)
632*4882a593Smuzhiyun hpa |= hva & (pte_size - psize);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /* Check WIMG vs. the actual page we're accessing */
635*4882a593Smuzhiyun if (!hpte_cache_flags_ok(r, is_ci)) {
636*4882a593Smuzhiyun if (is_ci)
637*4882a593Smuzhiyun goto out_put;
638*4882a593Smuzhiyun /*
639*4882a593Smuzhiyun * Allow guest to map emulated device memory as
640*4882a593Smuzhiyun * uncacheable, but actually make it cacheable.
641*4882a593Smuzhiyun */
642*4882a593Smuzhiyun r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /*
646*4882a593Smuzhiyun * Set the HPTE to point to hpa.
647*4882a593Smuzhiyun * Since the hpa is at PAGE_SIZE granularity, make sure we
648*4882a593Smuzhiyun * don't mask out lower-order bits if psize < PAGE_SIZE.
649*4882a593Smuzhiyun */
650*4882a593Smuzhiyun if (psize < PAGE_SIZE)
651*4882a593Smuzhiyun psize = PAGE_SIZE;
652*4882a593Smuzhiyun r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa;
653*4882a593Smuzhiyun if (hpte_is_writable(r) && !write_ok)
654*4882a593Smuzhiyun r = hpte_make_readonly(r);
655*4882a593Smuzhiyun ret = RESUME_GUEST;
656*4882a593Smuzhiyun preempt_disable();
657*4882a593Smuzhiyun while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
658*4882a593Smuzhiyun cpu_relax();
659*4882a593Smuzhiyun hnow_v = be64_to_cpu(hptep[0]);
660*4882a593Smuzhiyun hnow_r = be64_to_cpu(hptep[1]);
661*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
662*4882a593Smuzhiyun hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
663*4882a593Smuzhiyun hnow_r = hpte_new_to_old_r(hnow_r);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun * If the HPT is being resized, don't update the HPTE,
668*4882a593Smuzhiyun * instead let the guest retry after the resize operation is complete.
669*4882a593Smuzhiyun * The synchronization for mmu_ready test vs. set is provided
670*4882a593Smuzhiyun * by the HPTE lock.
671*4882a593Smuzhiyun */
672*4882a593Smuzhiyun if (!kvm->arch.mmu_ready)
673*4882a593Smuzhiyun goto out_unlock;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
676*4882a593Smuzhiyun rev->guest_rpte != hpte[2])
677*4882a593Smuzhiyun /* HPTE has been changed under us; let the guest retry */
678*4882a593Smuzhiyun goto out_unlock;
679*4882a593Smuzhiyun hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Always put the HPTE in the rmap chain for the page base address */
682*4882a593Smuzhiyun rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
683*4882a593Smuzhiyun lock_rmap(rmap);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* Check if we might have been invalidated; let the guest retry if so */
686*4882a593Smuzhiyun ret = RESUME_GUEST;
687*4882a593Smuzhiyun if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
688*4882a593Smuzhiyun unlock_rmap(rmap);
689*4882a593Smuzhiyun goto out_unlock;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
693*4882a593Smuzhiyun rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
694*4882a593Smuzhiyun r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
697*4882a593Smuzhiyun /* HPTE was previously valid, so we need to invalidate it */
698*4882a593Smuzhiyun unlock_rmap(rmap);
699*4882a593Smuzhiyun hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
700*4882a593Smuzhiyun kvmppc_invalidate_hpte(kvm, hptep, index);
701*4882a593Smuzhiyun /* don't lose previous R and C bits */
702*4882a593Smuzhiyun r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
703*4882a593Smuzhiyun } else {
704*4882a593Smuzhiyun kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
708*4882a593Smuzhiyun r = hpte_old_to_new_r(hpte[0], r);
709*4882a593Smuzhiyun hpte[0] = hpte_old_to_new_v(hpte[0]);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun hptep[1] = cpu_to_be64(r);
712*4882a593Smuzhiyun eieio();
713*4882a593Smuzhiyun __unlock_hpte(hptep, hpte[0]);
714*4882a593Smuzhiyun asm volatile("ptesync" : : : "memory");
715*4882a593Smuzhiyun preempt_enable();
716*4882a593Smuzhiyun if (page && hpte_is_writable(r))
717*4882a593Smuzhiyun set_page_dirty_lock(page);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun out_put:
720*4882a593Smuzhiyun trace_kvm_page_fault_exit(vcpu, hpte, ret);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (page)
723*4882a593Smuzhiyun put_page(page);
724*4882a593Smuzhiyun return ret;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun out_unlock:
727*4882a593Smuzhiyun __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
728*4882a593Smuzhiyun preempt_enable();
729*4882a593Smuzhiyun goto out_put;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
kvmppc_rmap_reset(struct kvm * kvm)732*4882a593Smuzhiyun void kvmppc_rmap_reset(struct kvm *kvm)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun struct kvm_memslots *slots;
735*4882a593Smuzhiyun struct kvm_memory_slot *memslot;
736*4882a593Smuzhiyun int srcu_idx;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
739*4882a593Smuzhiyun slots = kvm_memslots(kvm);
740*4882a593Smuzhiyun kvm_for_each_memslot(memslot, slots) {
741*4882a593Smuzhiyun /* Mutual exclusion with kvm_unmap_hva_range etc. */
742*4882a593Smuzhiyun spin_lock(&kvm->mmu_lock);
743*4882a593Smuzhiyun /*
744*4882a593Smuzhiyun * This assumes it is acceptable to lose reference and
745*4882a593Smuzhiyun * change bits across a reset.
746*4882a593Smuzhiyun */
747*4882a593Smuzhiyun memset(memslot->arch.rmap, 0,
748*4882a593Smuzhiyun memslot->npages * sizeof(*memslot->arch.rmap));
749*4882a593Smuzhiyun spin_unlock(&kvm->mmu_lock);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot,
755*4882a593Smuzhiyun unsigned long gfn);
756*4882a593Smuzhiyun
kvm_handle_hva_range(struct kvm * kvm,unsigned long start,unsigned long end,hva_handler_fn handler)757*4882a593Smuzhiyun static int kvm_handle_hva_range(struct kvm *kvm,
758*4882a593Smuzhiyun unsigned long start,
759*4882a593Smuzhiyun unsigned long end,
760*4882a593Smuzhiyun hva_handler_fn handler)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun int ret;
763*4882a593Smuzhiyun int retval = 0;
764*4882a593Smuzhiyun struct kvm_memslots *slots;
765*4882a593Smuzhiyun struct kvm_memory_slot *memslot;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun slots = kvm_memslots(kvm);
768*4882a593Smuzhiyun kvm_for_each_memslot(memslot, slots) {
769*4882a593Smuzhiyun unsigned long hva_start, hva_end;
770*4882a593Smuzhiyun gfn_t gfn, gfn_end;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun hva_start = max(start, memslot->userspace_addr);
773*4882a593Smuzhiyun hva_end = min(end, memslot->userspace_addr +
774*4882a593Smuzhiyun (memslot->npages << PAGE_SHIFT));
775*4882a593Smuzhiyun if (hva_start >= hva_end)
776*4882a593Smuzhiyun continue;
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun * {gfn(page) | page intersects with [hva_start, hva_end)} =
779*4882a593Smuzhiyun * {gfn, gfn+1, ..., gfn_end-1}.
780*4882a593Smuzhiyun */
781*4882a593Smuzhiyun gfn = hva_to_gfn_memslot(hva_start, memslot);
782*4882a593Smuzhiyun gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun for (; gfn < gfn_end; ++gfn) {
785*4882a593Smuzhiyun ret = handler(kvm, memslot, gfn);
786*4882a593Smuzhiyun retval |= ret;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun return retval;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
kvm_handle_hva(struct kvm * kvm,unsigned long hva,hva_handler_fn handler)793*4882a593Smuzhiyun static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
794*4882a593Smuzhiyun hva_handler_fn handler)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /* Must be called with both HPTE and rmap locked */
kvmppc_unmap_hpte(struct kvm * kvm,unsigned long i,struct kvm_memory_slot * memslot,unsigned long * rmapp,unsigned long gfn)800*4882a593Smuzhiyun static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
801*4882a593Smuzhiyun struct kvm_memory_slot *memslot,
802*4882a593Smuzhiyun unsigned long *rmapp, unsigned long gfn)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
805*4882a593Smuzhiyun struct revmap_entry *rev = kvm->arch.hpt.rev;
806*4882a593Smuzhiyun unsigned long j, h;
807*4882a593Smuzhiyun unsigned long ptel, psize, rcbits;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun j = rev[i].forw;
810*4882a593Smuzhiyun if (j == i) {
811*4882a593Smuzhiyun /* chain is now empty */
812*4882a593Smuzhiyun *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
813*4882a593Smuzhiyun } else {
814*4882a593Smuzhiyun /* remove i from chain */
815*4882a593Smuzhiyun h = rev[i].back;
816*4882a593Smuzhiyun rev[h].forw = j;
817*4882a593Smuzhiyun rev[j].back = h;
818*4882a593Smuzhiyun rev[i].forw = rev[i].back = i;
819*4882a593Smuzhiyun *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /* Now check and modify the HPTE */
823*4882a593Smuzhiyun ptel = rev[i].guest_rpte;
824*4882a593Smuzhiyun psize = kvmppc_actual_pgsz(be64_to_cpu(hptep[0]), ptel);
825*4882a593Smuzhiyun if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
826*4882a593Smuzhiyun hpte_rpn(ptel, psize) == gfn) {
827*4882a593Smuzhiyun hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
828*4882a593Smuzhiyun kvmppc_invalidate_hpte(kvm, hptep, i);
829*4882a593Smuzhiyun hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
830*4882a593Smuzhiyun /* Harvest R and C */
831*4882a593Smuzhiyun rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
832*4882a593Smuzhiyun *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
833*4882a593Smuzhiyun if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap)
834*4882a593Smuzhiyun kvmppc_update_dirty_map(memslot, gfn, psize);
835*4882a593Smuzhiyun if (rcbits & ~rev[i].guest_rpte) {
836*4882a593Smuzhiyun rev[i].guest_rpte = ptel | rcbits;
837*4882a593Smuzhiyun note_hpte_modification(kvm, &rev[i]);
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
kvm_unmap_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)842*4882a593Smuzhiyun static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
843*4882a593Smuzhiyun unsigned long gfn)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun unsigned long i;
846*4882a593Smuzhiyun __be64 *hptep;
847*4882a593Smuzhiyun unsigned long *rmapp;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
850*4882a593Smuzhiyun for (;;) {
851*4882a593Smuzhiyun lock_rmap(rmapp);
852*4882a593Smuzhiyun if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
853*4882a593Smuzhiyun unlock_rmap(rmapp);
854*4882a593Smuzhiyun break;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * To avoid an ABBA deadlock with the HPTE lock bit,
859*4882a593Smuzhiyun * we can't spin on the HPTE lock while holding the
860*4882a593Smuzhiyun * rmap chain lock.
861*4882a593Smuzhiyun */
862*4882a593Smuzhiyun i = *rmapp & KVMPPC_RMAP_INDEX;
863*4882a593Smuzhiyun hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
864*4882a593Smuzhiyun if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
865*4882a593Smuzhiyun /* unlock rmap before spinning on the HPTE lock */
866*4882a593Smuzhiyun unlock_rmap(rmapp);
867*4882a593Smuzhiyun while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
868*4882a593Smuzhiyun cpu_relax();
869*4882a593Smuzhiyun continue;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn);
873*4882a593Smuzhiyun unlock_rmap(rmapp);
874*4882a593Smuzhiyun __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun return 0;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
kvm_unmap_hva_range_hv(struct kvm * kvm,unsigned long start,unsigned long end)879*4882a593Smuzhiyun int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun hva_handler_fn handler;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
884*4882a593Smuzhiyun kvm_handle_hva_range(kvm, start, end, handler);
885*4882a593Smuzhiyun return 0;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
kvmppc_core_flush_memslot_hv(struct kvm * kvm,struct kvm_memory_slot * memslot)888*4882a593Smuzhiyun void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
889*4882a593Smuzhiyun struct kvm_memory_slot *memslot)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun unsigned long gfn;
892*4882a593Smuzhiyun unsigned long n;
893*4882a593Smuzhiyun unsigned long *rmapp;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun gfn = memslot->base_gfn;
896*4882a593Smuzhiyun rmapp = memslot->arch.rmap;
897*4882a593Smuzhiyun if (kvm_is_radix(kvm)) {
898*4882a593Smuzhiyun kvmppc_radix_flush_memslot(kvm, memslot);
899*4882a593Smuzhiyun return;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun for (n = memslot->npages; n; --n, ++gfn) {
903*4882a593Smuzhiyun /*
904*4882a593Smuzhiyun * Testing the present bit without locking is OK because
905*4882a593Smuzhiyun * the memslot has been marked invalid already, and hence
906*4882a593Smuzhiyun * no new HPTEs referencing this page can be created,
907*4882a593Smuzhiyun * thus the present bit can't go from 0 to 1.
908*4882a593Smuzhiyun */
909*4882a593Smuzhiyun if (*rmapp & KVMPPC_RMAP_PRESENT)
910*4882a593Smuzhiyun kvm_unmap_rmapp(kvm, memslot, gfn);
911*4882a593Smuzhiyun ++rmapp;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
kvm_age_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)915*4882a593Smuzhiyun static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
916*4882a593Smuzhiyun unsigned long gfn)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct revmap_entry *rev = kvm->arch.hpt.rev;
919*4882a593Smuzhiyun unsigned long head, i, j;
920*4882a593Smuzhiyun __be64 *hptep;
921*4882a593Smuzhiyun int ret = 0;
922*4882a593Smuzhiyun unsigned long *rmapp;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
925*4882a593Smuzhiyun retry:
926*4882a593Smuzhiyun lock_rmap(rmapp);
927*4882a593Smuzhiyun if (*rmapp & KVMPPC_RMAP_REFERENCED) {
928*4882a593Smuzhiyun *rmapp &= ~KVMPPC_RMAP_REFERENCED;
929*4882a593Smuzhiyun ret = 1;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
932*4882a593Smuzhiyun unlock_rmap(rmapp);
933*4882a593Smuzhiyun return ret;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun i = head = *rmapp & KVMPPC_RMAP_INDEX;
937*4882a593Smuzhiyun do {
938*4882a593Smuzhiyun hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
939*4882a593Smuzhiyun j = rev[i].forw;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /* If this HPTE isn't referenced, ignore it */
942*4882a593Smuzhiyun if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
943*4882a593Smuzhiyun continue;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
946*4882a593Smuzhiyun /* unlock rmap before spinning on the HPTE lock */
947*4882a593Smuzhiyun unlock_rmap(rmapp);
948*4882a593Smuzhiyun while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
949*4882a593Smuzhiyun cpu_relax();
950*4882a593Smuzhiyun goto retry;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun /* Now check and modify the HPTE */
954*4882a593Smuzhiyun if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
955*4882a593Smuzhiyun (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
956*4882a593Smuzhiyun kvmppc_clear_ref_hpte(kvm, hptep, i);
957*4882a593Smuzhiyun if (!(rev[i].guest_rpte & HPTE_R_R)) {
958*4882a593Smuzhiyun rev[i].guest_rpte |= HPTE_R_R;
959*4882a593Smuzhiyun note_hpte_modification(kvm, &rev[i]);
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun ret = 1;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
964*4882a593Smuzhiyun } while ((i = j) != head);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun unlock_rmap(rmapp);
967*4882a593Smuzhiyun return ret;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
kvm_age_hva_hv(struct kvm * kvm,unsigned long start,unsigned long end)970*4882a593Smuzhiyun int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun hva_handler_fn handler;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp;
975*4882a593Smuzhiyun return kvm_handle_hva_range(kvm, start, end, handler);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
kvm_test_age_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)978*4882a593Smuzhiyun static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
979*4882a593Smuzhiyun unsigned long gfn)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun struct revmap_entry *rev = kvm->arch.hpt.rev;
982*4882a593Smuzhiyun unsigned long head, i, j;
983*4882a593Smuzhiyun unsigned long *hp;
984*4882a593Smuzhiyun int ret = 1;
985*4882a593Smuzhiyun unsigned long *rmapp;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
988*4882a593Smuzhiyun if (*rmapp & KVMPPC_RMAP_REFERENCED)
989*4882a593Smuzhiyun return 1;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun lock_rmap(rmapp);
992*4882a593Smuzhiyun if (*rmapp & KVMPPC_RMAP_REFERENCED)
993*4882a593Smuzhiyun goto out;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun if (*rmapp & KVMPPC_RMAP_PRESENT) {
996*4882a593Smuzhiyun i = head = *rmapp & KVMPPC_RMAP_INDEX;
997*4882a593Smuzhiyun do {
998*4882a593Smuzhiyun hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4));
999*4882a593Smuzhiyun j = rev[i].forw;
1000*4882a593Smuzhiyun if (be64_to_cpu(hp[1]) & HPTE_R_R)
1001*4882a593Smuzhiyun goto out;
1002*4882a593Smuzhiyun } while ((i = j) != head);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun ret = 0;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun out:
1007*4882a593Smuzhiyun unlock_rmap(rmapp);
1008*4882a593Smuzhiyun return ret;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
kvm_test_age_hva_hv(struct kvm * kvm,unsigned long hva)1011*4882a593Smuzhiyun int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun hva_handler_fn handler;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp;
1016*4882a593Smuzhiyun return kvm_handle_hva(kvm, hva, handler);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
kvm_set_spte_hva_hv(struct kvm * kvm,unsigned long hva,pte_t pte)1019*4882a593Smuzhiyun void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun hva_handler_fn handler;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
1024*4882a593Smuzhiyun kvm_handle_hva(kvm, hva, handler);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
vcpus_running(struct kvm * kvm)1027*4882a593Smuzhiyun static int vcpus_running(struct kvm *kvm)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun return atomic_read(&kvm->arch.vcpus_running) != 0;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun /*
1033*4882a593Smuzhiyun * Returns the number of system pages that are dirty.
1034*4882a593Smuzhiyun * This can be more than 1 if we find a huge-page HPTE.
1035*4882a593Smuzhiyun */
kvm_test_clear_dirty_npages(struct kvm * kvm,unsigned long * rmapp)1036*4882a593Smuzhiyun static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct revmap_entry *rev = kvm->arch.hpt.rev;
1039*4882a593Smuzhiyun unsigned long head, i, j;
1040*4882a593Smuzhiyun unsigned long n;
1041*4882a593Smuzhiyun unsigned long v, r;
1042*4882a593Smuzhiyun __be64 *hptep;
1043*4882a593Smuzhiyun int npages_dirty = 0;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun retry:
1046*4882a593Smuzhiyun lock_rmap(rmapp);
1047*4882a593Smuzhiyun if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
1048*4882a593Smuzhiyun unlock_rmap(rmapp);
1049*4882a593Smuzhiyun return npages_dirty;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun i = head = *rmapp & KVMPPC_RMAP_INDEX;
1053*4882a593Smuzhiyun do {
1054*4882a593Smuzhiyun unsigned long hptep1;
1055*4882a593Smuzhiyun hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
1056*4882a593Smuzhiyun j = rev[i].forw;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun /*
1059*4882a593Smuzhiyun * Checking the C (changed) bit here is racy since there
1060*4882a593Smuzhiyun * is no guarantee about when the hardware writes it back.
1061*4882a593Smuzhiyun * If the HPTE is not writable then it is stable since the
1062*4882a593Smuzhiyun * page can't be written to, and we would have done a tlbie
1063*4882a593Smuzhiyun * (which forces the hardware to complete any writeback)
1064*4882a593Smuzhiyun * when making the HPTE read-only.
1065*4882a593Smuzhiyun * If vcpus are running then this call is racy anyway
1066*4882a593Smuzhiyun * since the page could get dirtied subsequently, so we
1067*4882a593Smuzhiyun * expect there to be a further call which would pick up
1068*4882a593Smuzhiyun * any delayed C bit writeback.
1069*4882a593Smuzhiyun * Otherwise we need to do the tlbie even if C==0 in
1070*4882a593Smuzhiyun * order to pick up any delayed writeback of C.
1071*4882a593Smuzhiyun */
1072*4882a593Smuzhiyun hptep1 = be64_to_cpu(hptep[1]);
1073*4882a593Smuzhiyun if (!(hptep1 & HPTE_R_C) &&
1074*4882a593Smuzhiyun (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
1075*4882a593Smuzhiyun continue;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
1078*4882a593Smuzhiyun /* unlock rmap before spinning on the HPTE lock */
1079*4882a593Smuzhiyun unlock_rmap(rmapp);
1080*4882a593Smuzhiyun while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
1081*4882a593Smuzhiyun cpu_relax();
1082*4882a593Smuzhiyun goto retry;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun /* Now check and modify the HPTE */
1086*4882a593Smuzhiyun if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
1087*4882a593Smuzhiyun __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
1088*4882a593Smuzhiyun continue;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /* need to make it temporarily absent so C is stable */
1092*4882a593Smuzhiyun hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
1093*4882a593Smuzhiyun kvmppc_invalidate_hpte(kvm, hptep, i);
1094*4882a593Smuzhiyun v = be64_to_cpu(hptep[0]);
1095*4882a593Smuzhiyun r = be64_to_cpu(hptep[1]);
1096*4882a593Smuzhiyun if (r & HPTE_R_C) {
1097*4882a593Smuzhiyun hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
1098*4882a593Smuzhiyun if (!(rev[i].guest_rpte & HPTE_R_C)) {
1099*4882a593Smuzhiyun rev[i].guest_rpte |= HPTE_R_C;
1100*4882a593Smuzhiyun note_hpte_modification(kvm, &rev[i]);
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun n = kvmppc_actual_pgsz(v, r);
1103*4882a593Smuzhiyun n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104*4882a593Smuzhiyun if (n > npages_dirty)
1105*4882a593Smuzhiyun npages_dirty = n;
1106*4882a593Smuzhiyun eieio();
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun v &= ~HPTE_V_ABSENT;
1109*4882a593Smuzhiyun v |= HPTE_V_VALID;
1110*4882a593Smuzhiyun __unlock_hpte(hptep, v);
1111*4882a593Smuzhiyun } while ((i = j) != head);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun unlock_rmap(rmapp);
1114*4882a593Smuzhiyun return npages_dirty;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
kvmppc_harvest_vpa_dirty(struct kvmppc_vpa * vpa,struct kvm_memory_slot * memslot,unsigned long * map)1117*4882a593Smuzhiyun void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
1118*4882a593Smuzhiyun struct kvm_memory_slot *memslot,
1119*4882a593Smuzhiyun unsigned long *map)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun unsigned long gfn;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (!vpa->dirty || !vpa->pinned_addr)
1124*4882a593Smuzhiyun return;
1125*4882a593Smuzhiyun gfn = vpa->gpa >> PAGE_SHIFT;
1126*4882a593Smuzhiyun if (gfn < memslot->base_gfn ||
1127*4882a593Smuzhiyun gfn >= memslot->base_gfn + memslot->npages)
1128*4882a593Smuzhiyun return;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun vpa->dirty = false;
1131*4882a593Smuzhiyun if (map)
1132*4882a593Smuzhiyun __set_bit_le(gfn - memslot->base_gfn, map);
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
kvmppc_hv_get_dirty_log_hpt(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map)1135*4882a593Smuzhiyun long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
1136*4882a593Smuzhiyun struct kvm_memory_slot *memslot, unsigned long *map)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun unsigned long i;
1139*4882a593Smuzhiyun unsigned long *rmapp;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun preempt_disable();
1142*4882a593Smuzhiyun rmapp = memslot->arch.rmap;
1143*4882a593Smuzhiyun for (i = 0; i < memslot->npages; ++i) {
1144*4882a593Smuzhiyun int npages = kvm_test_clear_dirty_npages(kvm, rmapp);
1145*4882a593Smuzhiyun /*
1146*4882a593Smuzhiyun * Note that if npages > 0 then i must be a multiple of npages,
1147*4882a593Smuzhiyun * since we always put huge-page HPTEs in the rmap chain
1148*4882a593Smuzhiyun * corresponding to their page base address.
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun if (npages)
1151*4882a593Smuzhiyun set_dirty_bits(map, i, npages);
1152*4882a593Smuzhiyun ++rmapp;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun preempt_enable();
1155*4882a593Smuzhiyun return 0;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
kvmppc_pin_guest_page(struct kvm * kvm,unsigned long gpa,unsigned long * nb_ret)1158*4882a593Smuzhiyun void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1159*4882a593Smuzhiyun unsigned long *nb_ret)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun struct kvm_memory_slot *memslot;
1162*4882a593Smuzhiyun unsigned long gfn = gpa >> PAGE_SHIFT;
1163*4882a593Smuzhiyun struct page *page, *pages[1];
1164*4882a593Smuzhiyun int npages;
1165*4882a593Smuzhiyun unsigned long hva, offset;
1166*4882a593Smuzhiyun int srcu_idx;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
1169*4882a593Smuzhiyun memslot = gfn_to_memslot(kvm, gfn);
1170*4882a593Smuzhiyun if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1171*4882a593Smuzhiyun goto err;
1172*4882a593Smuzhiyun hva = gfn_to_hva_memslot(memslot, gfn);
1173*4882a593Smuzhiyun npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages);
1174*4882a593Smuzhiyun if (npages < 1)
1175*4882a593Smuzhiyun goto err;
1176*4882a593Smuzhiyun page = pages[0];
1177*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun offset = gpa & (PAGE_SIZE - 1);
1180*4882a593Smuzhiyun if (nb_ret)
1181*4882a593Smuzhiyun *nb_ret = PAGE_SIZE - offset;
1182*4882a593Smuzhiyun return page_address(page) + offset;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun err:
1185*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
1186*4882a593Smuzhiyun return NULL;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
kvmppc_unpin_guest_page(struct kvm * kvm,void * va,unsigned long gpa,bool dirty)1189*4882a593Smuzhiyun void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1190*4882a593Smuzhiyun bool dirty)
1191*4882a593Smuzhiyun {
1192*4882a593Smuzhiyun struct page *page = virt_to_page(va);
1193*4882a593Smuzhiyun struct kvm_memory_slot *memslot;
1194*4882a593Smuzhiyun unsigned long gfn;
1195*4882a593Smuzhiyun int srcu_idx;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun put_page(page);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (!dirty)
1200*4882a593Smuzhiyun return;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun /* We need to mark this page dirty in the memslot dirty_bitmap, if any */
1203*4882a593Smuzhiyun gfn = gpa >> PAGE_SHIFT;
1204*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
1205*4882a593Smuzhiyun memslot = gfn_to_memslot(kvm, gfn);
1206*4882a593Smuzhiyun if (memslot && memslot->dirty_bitmap)
1207*4882a593Smuzhiyun set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap);
1208*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun /*
1212*4882a593Smuzhiyun * HPT resizing
1213*4882a593Smuzhiyun */
resize_hpt_allocate(struct kvm_resize_hpt * resize)1214*4882a593Smuzhiyun static int resize_hpt_allocate(struct kvm_resize_hpt *resize)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun int rc;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun rc = kvmppc_allocate_hpt(&resize->hpt, resize->order);
1219*4882a593Smuzhiyun if (rc < 0)
1220*4882a593Smuzhiyun return rc;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n",
1223*4882a593Smuzhiyun resize->hpt.virt);
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun return 0;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
resize_hpt_rehash_hpte(struct kvm_resize_hpt * resize,unsigned long idx)1228*4882a593Smuzhiyun static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
1229*4882a593Smuzhiyun unsigned long idx)
1230*4882a593Smuzhiyun {
1231*4882a593Smuzhiyun struct kvm *kvm = resize->kvm;
1232*4882a593Smuzhiyun struct kvm_hpt_info *old = &kvm->arch.hpt;
1233*4882a593Smuzhiyun struct kvm_hpt_info *new = &resize->hpt;
1234*4882a593Smuzhiyun unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1;
1235*4882a593Smuzhiyun unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1;
1236*4882a593Smuzhiyun __be64 *hptep, *new_hptep;
1237*4882a593Smuzhiyun unsigned long vpte, rpte, guest_rpte;
1238*4882a593Smuzhiyun int ret;
1239*4882a593Smuzhiyun struct revmap_entry *rev;
1240*4882a593Smuzhiyun unsigned long apsize, avpn, pteg, hash;
1241*4882a593Smuzhiyun unsigned long new_idx, new_pteg, replace_vpte;
1242*4882a593Smuzhiyun int pshift;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun hptep = (__be64 *)(old->virt + (idx << 4));
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun /* Guest is stopped, so new HPTEs can't be added or faulted
1247*4882a593Smuzhiyun * in, only unmapped or altered by host actions. So, it's
1248*4882a593Smuzhiyun * safe to check this before we take the HPTE lock */
1249*4882a593Smuzhiyun vpte = be64_to_cpu(hptep[0]);
1250*4882a593Smuzhiyun if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT))
1251*4882a593Smuzhiyun return 0; /* nothing to do */
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
1254*4882a593Smuzhiyun cpu_relax();
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun vpte = be64_to_cpu(hptep[0]);
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun ret = 0;
1259*4882a593Smuzhiyun if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT))
1260*4882a593Smuzhiyun /* Nothing to do */
1261*4882a593Smuzhiyun goto out;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1264*4882a593Smuzhiyun rpte = be64_to_cpu(hptep[1]);
1265*4882a593Smuzhiyun vpte = hpte_new_to_old_v(vpte, rpte);
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /* Unmap */
1269*4882a593Smuzhiyun rev = &old->rev[idx];
1270*4882a593Smuzhiyun guest_rpte = rev->guest_rpte;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun ret = -EIO;
1273*4882a593Smuzhiyun apsize = kvmppc_actual_pgsz(vpte, guest_rpte);
1274*4882a593Smuzhiyun if (!apsize)
1275*4882a593Smuzhiyun goto out;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun if (vpte & HPTE_V_VALID) {
1278*4882a593Smuzhiyun unsigned long gfn = hpte_rpn(guest_rpte, apsize);
1279*4882a593Smuzhiyun int srcu_idx = srcu_read_lock(&kvm->srcu);
1280*4882a593Smuzhiyun struct kvm_memory_slot *memslot =
1281*4882a593Smuzhiyun __gfn_to_memslot(kvm_memslots(kvm), gfn);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun if (memslot) {
1284*4882a593Smuzhiyun unsigned long *rmapp;
1285*4882a593Smuzhiyun rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun lock_rmap(rmapp);
1288*4882a593Smuzhiyun kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn);
1289*4882a593Smuzhiyun unlock_rmap(rmapp);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun /* Reload PTE after unmap */
1296*4882a593Smuzhiyun vpte = be64_to_cpu(hptep[0]);
1297*4882a593Smuzhiyun BUG_ON(vpte & HPTE_V_VALID);
1298*4882a593Smuzhiyun BUG_ON(!(vpte & HPTE_V_ABSENT));
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun ret = 0;
1301*4882a593Smuzhiyun if (!(vpte & HPTE_V_BOLTED))
1302*4882a593Smuzhiyun goto out;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun rpte = be64_to_cpu(hptep[1]);
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1307*4882a593Smuzhiyun vpte = hpte_new_to_old_v(vpte, rpte);
1308*4882a593Smuzhiyun rpte = hpte_new_to_old_r(rpte);
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
1312*4882a593Smuzhiyun avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
1313*4882a593Smuzhiyun pteg = idx / HPTES_PER_GROUP;
1314*4882a593Smuzhiyun if (vpte & HPTE_V_SECONDARY)
1315*4882a593Smuzhiyun pteg = ~pteg;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun if (!(vpte & HPTE_V_1TB_SEG)) {
1318*4882a593Smuzhiyun unsigned long offset, vsid;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun /* We only have 28 - 23 bits of offset in avpn */
1321*4882a593Smuzhiyun offset = (avpn & 0x1f) << 23;
1322*4882a593Smuzhiyun vsid = avpn >> 5;
1323*4882a593Smuzhiyun /* We can find more bits from the pteg value */
1324*4882a593Smuzhiyun if (pshift < 23)
1325*4882a593Smuzhiyun offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun hash = vsid ^ (offset >> pshift);
1328*4882a593Smuzhiyun } else {
1329*4882a593Smuzhiyun unsigned long offset, vsid;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /* We only have 40 - 23 bits of seg_off in avpn */
1332*4882a593Smuzhiyun offset = (avpn & 0x1ffff) << 23;
1333*4882a593Smuzhiyun vsid = avpn >> 17;
1334*4882a593Smuzhiyun if (pshift < 23)
1335*4882a593Smuzhiyun offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun new_pteg = hash & new_hash_mask;
1341*4882a593Smuzhiyun if (vpte & HPTE_V_SECONDARY)
1342*4882a593Smuzhiyun new_pteg = ~hash & new_hash_mask;
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP);
1345*4882a593Smuzhiyun new_hptep = (__be64 *)(new->virt + (new_idx << 4));
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun replace_vpte = be64_to_cpu(new_hptep[0]);
1348*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1349*4882a593Smuzhiyun unsigned long replace_rpte = be64_to_cpu(new_hptep[1]);
1350*4882a593Smuzhiyun replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte);
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1354*4882a593Smuzhiyun BUG_ON(new->order >= old->order);
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun if (replace_vpte & HPTE_V_BOLTED) {
1357*4882a593Smuzhiyun if (vpte & HPTE_V_BOLTED)
1358*4882a593Smuzhiyun /* Bolted collision, nothing we can do */
1359*4882a593Smuzhiyun ret = -ENOSPC;
1360*4882a593Smuzhiyun /* Discard the new HPTE */
1361*4882a593Smuzhiyun goto out;
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun /* Discard the previous HPTE */
1365*4882a593Smuzhiyun }
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1368*4882a593Smuzhiyun rpte = hpte_old_to_new_r(vpte, rpte);
1369*4882a593Smuzhiyun vpte = hpte_old_to_new_v(vpte);
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun new_hptep[1] = cpu_to_be64(rpte);
1373*4882a593Smuzhiyun new->rev[new_idx].guest_rpte = guest_rpte;
1374*4882a593Smuzhiyun /* No need for a barrier, since new HPT isn't active */
1375*4882a593Smuzhiyun new_hptep[0] = cpu_to_be64(vpte);
1376*4882a593Smuzhiyun unlock_hpte(new_hptep, vpte);
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun out:
1379*4882a593Smuzhiyun unlock_hpte(hptep, vpte);
1380*4882a593Smuzhiyun return ret;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
resize_hpt_rehash(struct kvm_resize_hpt * resize)1383*4882a593Smuzhiyun static int resize_hpt_rehash(struct kvm_resize_hpt *resize)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun struct kvm *kvm = resize->kvm;
1386*4882a593Smuzhiyun unsigned long i;
1387*4882a593Smuzhiyun int rc;
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
1390*4882a593Smuzhiyun rc = resize_hpt_rehash_hpte(resize, i);
1391*4882a593Smuzhiyun if (rc != 0)
1392*4882a593Smuzhiyun return rc;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun return 0;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
resize_hpt_pivot(struct kvm_resize_hpt * resize)1398*4882a593Smuzhiyun static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun struct kvm *kvm = resize->kvm;
1401*4882a593Smuzhiyun struct kvm_hpt_info hpt_tmp;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun /* Exchange the pending tables in the resize structure with
1404*4882a593Smuzhiyun * the active tables */
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun resize_hpt_debug(resize, "resize_hpt_pivot()\n");
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun spin_lock(&kvm->mmu_lock);
1409*4882a593Smuzhiyun asm volatile("ptesync" : : : "memory");
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun hpt_tmp = kvm->arch.hpt;
1412*4882a593Smuzhiyun kvmppc_set_hpt(kvm, &resize->hpt);
1413*4882a593Smuzhiyun resize->hpt = hpt_tmp;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun spin_unlock(&kvm->mmu_lock);
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun synchronize_srcu_expedited(&kvm->srcu);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300))
1420*4882a593Smuzhiyun kvmppc_setup_partition_table(kvm);
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun
resize_hpt_release(struct kvm * kvm,struct kvm_resize_hpt * resize)1425*4882a593Smuzhiyun static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
1428*4882a593Smuzhiyun return;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun if (!resize)
1431*4882a593Smuzhiyun return;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun if (resize->error != -EBUSY) {
1434*4882a593Smuzhiyun if (resize->hpt.virt)
1435*4882a593Smuzhiyun kvmppc_free_hpt(&resize->hpt);
1436*4882a593Smuzhiyun kfree(resize);
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun if (kvm->arch.resize_hpt == resize)
1440*4882a593Smuzhiyun kvm->arch.resize_hpt = NULL;
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun
resize_hpt_prepare_work(struct work_struct * work)1443*4882a593Smuzhiyun static void resize_hpt_prepare_work(struct work_struct *work)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun struct kvm_resize_hpt *resize = container_of(work,
1446*4882a593Smuzhiyun struct kvm_resize_hpt,
1447*4882a593Smuzhiyun work);
1448*4882a593Smuzhiyun struct kvm *kvm = resize->kvm;
1449*4882a593Smuzhiyun int err = 0;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun if (WARN_ON(resize->error != -EBUSY))
1452*4882a593Smuzhiyun return;
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun mutex_lock(&kvm->arch.mmu_setup_lock);
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun /* Request is still current? */
1457*4882a593Smuzhiyun if (kvm->arch.resize_hpt == resize) {
1458*4882a593Smuzhiyun /* We may request large allocations here:
1459*4882a593Smuzhiyun * do not sleep with kvm->arch.mmu_setup_lock held for a while.
1460*4882a593Smuzhiyun */
1461*4882a593Smuzhiyun mutex_unlock(&kvm->arch.mmu_setup_lock);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
1464*4882a593Smuzhiyun resize->order);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun err = resize_hpt_allocate(resize);
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun /* We have strict assumption about -EBUSY
1469*4882a593Smuzhiyun * when preparing for HPT resize.
1470*4882a593Smuzhiyun */
1471*4882a593Smuzhiyun if (WARN_ON(err == -EBUSY))
1472*4882a593Smuzhiyun err = -EINPROGRESS;
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun mutex_lock(&kvm->arch.mmu_setup_lock);
1475*4882a593Smuzhiyun /* It is possible that kvm->arch.resize_hpt != resize
1476*4882a593Smuzhiyun * after we grab kvm->arch.mmu_setup_lock again.
1477*4882a593Smuzhiyun */
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun resize->error = err;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun if (kvm->arch.resize_hpt != resize)
1483*4882a593Smuzhiyun resize_hpt_release(kvm, resize);
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun mutex_unlock(&kvm->arch.mmu_setup_lock);
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
kvm_vm_ioctl_resize_hpt_prepare(struct kvm * kvm,struct kvm_ppc_resize_hpt * rhpt)1488*4882a593Smuzhiyun long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1489*4882a593Smuzhiyun struct kvm_ppc_resize_hpt *rhpt)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun unsigned long flags = rhpt->flags;
1492*4882a593Smuzhiyun unsigned long shift = rhpt->shift;
1493*4882a593Smuzhiyun struct kvm_resize_hpt *resize;
1494*4882a593Smuzhiyun int ret;
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun if (flags != 0 || kvm_is_radix(kvm))
1497*4882a593Smuzhiyun return -EINVAL;
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun if (shift && ((shift < 18) || (shift > 46)))
1500*4882a593Smuzhiyun return -EINVAL;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun mutex_lock(&kvm->arch.mmu_setup_lock);
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun resize = kvm->arch.resize_hpt;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun if (resize) {
1507*4882a593Smuzhiyun if (resize->order == shift) {
1508*4882a593Smuzhiyun /* Suitable resize in progress? */
1509*4882a593Smuzhiyun ret = resize->error;
1510*4882a593Smuzhiyun if (ret == -EBUSY)
1511*4882a593Smuzhiyun ret = 100; /* estimated time in ms */
1512*4882a593Smuzhiyun else if (ret)
1513*4882a593Smuzhiyun resize_hpt_release(kvm, resize);
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun goto out;
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun /* not suitable, cancel it */
1519*4882a593Smuzhiyun resize_hpt_release(kvm, resize);
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun ret = 0;
1523*4882a593Smuzhiyun if (!shift)
1524*4882a593Smuzhiyun goto out; /* nothing to do */
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun /* start new resize */
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun resize = kzalloc(sizeof(*resize), GFP_KERNEL);
1529*4882a593Smuzhiyun if (!resize) {
1530*4882a593Smuzhiyun ret = -ENOMEM;
1531*4882a593Smuzhiyun goto out;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun resize->error = -EBUSY;
1535*4882a593Smuzhiyun resize->order = shift;
1536*4882a593Smuzhiyun resize->kvm = kvm;
1537*4882a593Smuzhiyun INIT_WORK(&resize->work, resize_hpt_prepare_work);
1538*4882a593Smuzhiyun kvm->arch.resize_hpt = resize;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun schedule_work(&resize->work);
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun ret = 100; /* estimated time in ms */
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun out:
1545*4882a593Smuzhiyun mutex_unlock(&kvm->arch.mmu_setup_lock);
1546*4882a593Smuzhiyun return ret;
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
resize_hpt_boot_vcpu(void * opaque)1549*4882a593Smuzhiyun static void resize_hpt_boot_vcpu(void *opaque)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun /* Nothing to do, just force a KVM exit */
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun
kvm_vm_ioctl_resize_hpt_commit(struct kvm * kvm,struct kvm_ppc_resize_hpt * rhpt)1554*4882a593Smuzhiyun long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
1555*4882a593Smuzhiyun struct kvm_ppc_resize_hpt *rhpt)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun unsigned long flags = rhpt->flags;
1558*4882a593Smuzhiyun unsigned long shift = rhpt->shift;
1559*4882a593Smuzhiyun struct kvm_resize_hpt *resize;
1560*4882a593Smuzhiyun long ret;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun if (flags != 0 || kvm_is_radix(kvm))
1563*4882a593Smuzhiyun return -EINVAL;
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun if (shift && ((shift < 18) || (shift > 46)))
1566*4882a593Smuzhiyun return -EINVAL;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun mutex_lock(&kvm->arch.mmu_setup_lock);
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun resize = kvm->arch.resize_hpt;
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun /* This shouldn't be possible */
1573*4882a593Smuzhiyun ret = -EIO;
1574*4882a593Smuzhiyun if (WARN_ON(!kvm->arch.mmu_ready))
1575*4882a593Smuzhiyun goto out_no_hpt;
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun /* Stop VCPUs from running while we mess with the HPT */
1578*4882a593Smuzhiyun kvm->arch.mmu_ready = 0;
1579*4882a593Smuzhiyun smp_mb();
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun /* Boot all CPUs out of the guest so they re-read
1582*4882a593Smuzhiyun * mmu_ready */
1583*4882a593Smuzhiyun on_each_cpu(resize_hpt_boot_vcpu, NULL, 1);
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun ret = -ENXIO;
1586*4882a593Smuzhiyun if (!resize || (resize->order != shift))
1587*4882a593Smuzhiyun goto out;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun ret = resize->error;
1590*4882a593Smuzhiyun if (ret)
1591*4882a593Smuzhiyun goto out;
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun ret = resize_hpt_rehash(resize);
1594*4882a593Smuzhiyun if (ret)
1595*4882a593Smuzhiyun goto out;
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun resize_hpt_pivot(resize);
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun out:
1600*4882a593Smuzhiyun /* Let VCPUs run again */
1601*4882a593Smuzhiyun kvm->arch.mmu_ready = 1;
1602*4882a593Smuzhiyun smp_mb();
1603*4882a593Smuzhiyun out_no_hpt:
1604*4882a593Smuzhiyun resize_hpt_release(kvm, resize);
1605*4882a593Smuzhiyun mutex_unlock(&kvm->arch.mmu_setup_lock);
1606*4882a593Smuzhiyun return ret;
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun /*
1610*4882a593Smuzhiyun * Functions for reading and writing the hash table via reads and
1611*4882a593Smuzhiyun * writes on a file descriptor.
1612*4882a593Smuzhiyun *
1613*4882a593Smuzhiyun * Reads return the guest view of the hash table, which has to be
1614*4882a593Smuzhiyun * pieced together from the real hash table and the guest_rpte
1615*4882a593Smuzhiyun * values in the revmap array.
1616*4882a593Smuzhiyun *
1617*4882a593Smuzhiyun * On writes, each HPTE written is considered in turn, and if it
1618*4882a593Smuzhiyun * is valid, it is written to the HPT as if an H_ENTER with the
1619*4882a593Smuzhiyun * exact flag set was done. When the invalid count is non-zero
1620*4882a593Smuzhiyun * in the header written to the stream, the kernel will make
1621*4882a593Smuzhiyun * sure that that many HPTEs are invalid, and invalidate them
1622*4882a593Smuzhiyun * if not.
1623*4882a593Smuzhiyun */
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun struct kvm_htab_ctx {
1626*4882a593Smuzhiyun unsigned long index;
1627*4882a593Smuzhiyun unsigned long flags;
1628*4882a593Smuzhiyun struct kvm *kvm;
1629*4882a593Smuzhiyun int first_pass;
1630*4882a593Smuzhiyun };
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun #define HPTE_SIZE (2 * sizeof(unsigned long))
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun /*
1635*4882a593Smuzhiyun * Returns 1 if this HPT entry has been modified or has pending
1636*4882a593Smuzhiyun * R/C bit changes.
1637*4882a593Smuzhiyun */
hpte_dirty(struct revmap_entry * revp,__be64 * hptp)1638*4882a593Smuzhiyun static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
1639*4882a593Smuzhiyun {
1640*4882a593Smuzhiyun unsigned long rcbits_unset;
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun if (revp->guest_rpte & HPTE_GR_MODIFIED)
1643*4882a593Smuzhiyun return 1;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun /* Also need to consider changes in reference and changed bits */
1646*4882a593Smuzhiyun rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1647*4882a593Smuzhiyun if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
1648*4882a593Smuzhiyun (be64_to_cpu(hptp[1]) & rcbits_unset))
1649*4882a593Smuzhiyun return 1;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun return 0;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun
record_hpte(unsigned long flags,__be64 * hptp,unsigned long * hpte,struct revmap_entry * revp,int want_valid,int first_pass)1654*4882a593Smuzhiyun static long record_hpte(unsigned long flags, __be64 *hptp,
1655*4882a593Smuzhiyun unsigned long *hpte, struct revmap_entry *revp,
1656*4882a593Smuzhiyun int want_valid, int first_pass)
1657*4882a593Smuzhiyun {
1658*4882a593Smuzhiyun unsigned long v, r, hr;
1659*4882a593Smuzhiyun unsigned long rcbits_unset;
1660*4882a593Smuzhiyun int ok = 1;
1661*4882a593Smuzhiyun int valid, dirty;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun /* Unmodified entries are uninteresting except on the first pass */
1664*4882a593Smuzhiyun dirty = hpte_dirty(revp, hptp);
1665*4882a593Smuzhiyun if (!first_pass && !dirty)
1666*4882a593Smuzhiyun return 0;
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun valid = 0;
1669*4882a593Smuzhiyun if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1670*4882a593Smuzhiyun valid = 1;
1671*4882a593Smuzhiyun if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
1672*4882a593Smuzhiyun !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
1673*4882a593Smuzhiyun valid = 0;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun if (valid != want_valid)
1676*4882a593Smuzhiyun return 0;
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun v = r = 0;
1679*4882a593Smuzhiyun if (valid || dirty) {
1680*4882a593Smuzhiyun /* lock the HPTE so it's stable and read it */
1681*4882a593Smuzhiyun preempt_disable();
1682*4882a593Smuzhiyun while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1683*4882a593Smuzhiyun cpu_relax();
1684*4882a593Smuzhiyun v = be64_to_cpu(hptp[0]);
1685*4882a593Smuzhiyun hr = be64_to_cpu(hptp[1]);
1686*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1687*4882a593Smuzhiyun v = hpte_new_to_old_v(v, hr);
1688*4882a593Smuzhiyun hr = hpte_new_to_old_r(hr);
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun /* re-evaluate valid and dirty from synchronized HPTE value */
1692*4882a593Smuzhiyun valid = !!(v & HPTE_V_VALID);
1693*4882a593Smuzhiyun dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun /* Harvest R and C into guest view if necessary */
1696*4882a593Smuzhiyun rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1697*4882a593Smuzhiyun if (valid && (rcbits_unset & hr)) {
1698*4882a593Smuzhiyun revp->guest_rpte |= (hr &
1699*4882a593Smuzhiyun (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
1700*4882a593Smuzhiyun dirty = 1;
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun if (v & HPTE_V_ABSENT) {
1704*4882a593Smuzhiyun v &= ~HPTE_V_ABSENT;
1705*4882a593Smuzhiyun v |= HPTE_V_VALID;
1706*4882a593Smuzhiyun valid = 1;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
1709*4882a593Smuzhiyun valid = 0;
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun r = revp->guest_rpte;
1712*4882a593Smuzhiyun /* only clear modified if this is the right sort of entry */
1713*4882a593Smuzhiyun if (valid == want_valid && dirty) {
1714*4882a593Smuzhiyun r &= ~HPTE_GR_MODIFIED;
1715*4882a593Smuzhiyun revp->guest_rpte = r;
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun unlock_hpte(hptp, be64_to_cpu(hptp[0]));
1718*4882a593Smuzhiyun preempt_enable();
1719*4882a593Smuzhiyun if (!(valid == want_valid && (first_pass || dirty)))
1720*4882a593Smuzhiyun ok = 0;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun hpte[0] = cpu_to_be64(v);
1723*4882a593Smuzhiyun hpte[1] = cpu_to_be64(r);
1724*4882a593Smuzhiyun return ok;
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun
kvm_htab_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1727*4882a593Smuzhiyun static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1728*4882a593Smuzhiyun size_t count, loff_t *ppos)
1729*4882a593Smuzhiyun {
1730*4882a593Smuzhiyun struct kvm_htab_ctx *ctx = file->private_data;
1731*4882a593Smuzhiyun struct kvm *kvm = ctx->kvm;
1732*4882a593Smuzhiyun struct kvm_get_htab_header hdr;
1733*4882a593Smuzhiyun __be64 *hptp;
1734*4882a593Smuzhiyun struct revmap_entry *revp;
1735*4882a593Smuzhiyun unsigned long i, nb, nw;
1736*4882a593Smuzhiyun unsigned long __user *lbuf;
1737*4882a593Smuzhiyun struct kvm_get_htab_header __user *hptr;
1738*4882a593Smuzhiyun unsigned long flags;
1739*4882a593Smuzhiyun int first_pass;
1740*4882a593Smuzhiyun unsigned long hpte[2];
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun if (!access_ok(buf, count))
1743*4882a593Smuzhiyun return -EFAULT;
1744*4882a593Smuzhiyun if (kvm_is_radix(kvm))
1745*4882a593Smuzhiyun return 0;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun first_pass = ctx->first_pass;
1748*4882a593Smuzhiyun flags = ctx->flags;
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun i = ctx->index;
1751*4882a593Smuzhiyun hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
1752*4882a593Smuzhiyun revp = kvm->arch.hpt.rev + i;
1753*4882a593Smuzhiyun lbuf = (unsigned long __user *)buf;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun nb = 0;
1756*4882a593Smuzhiyun while (nb + sizeof(hdr) + HPTE_SIZE < count) {
1757*4882a593Smuzhiyun /* Initialize header */
1758*4882a593Smuzhiyun hptr = (struct kvm_get_htab_header __user *)buf;
1759*4882a593Smuzhiyun hdr.n_valid = 0;
1760*4882a593Smuzhiyun hdr.n_invalid = 0;
1761*4882a593Smuzhiyun nw = nb;
1762*4882a593Smuzhiyun nb += sizeof(hdr);
1763*4882a593Smuzhiyun lbuf = (unsigned long __user *)(buf + sizeof(hdr));
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun /* Skip uninteresting entries, i.e. clean on not-first pass */
1766*4882a593Smuzhiyun if (!first_pass) {
1767*4882a593Smuzhiyun while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1768*4882a593Smuzhiyun !hpte_dirty(revp, hptp)) {
1769*4882a593Smuzhiyun ++i;
1770*4882a593Smuzhiyun hptp += 2;
1771*4882a593Smuzhiyun ++revp;
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun hdr.index = i;
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun /* Grab a series of valid entries */
1777*4882a593Smuzhiyun while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1778*4882a593Smuzhiyun hdr.n_valid < 0xffff &&
1779*4882a593Smuzhiyun nb + HPTE_SIZE < count &&
1780*4882a593Smuzhiyun record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
1781*4882a593Smuzhiyun /* valid entry, write it out */
1782*4882a593Smuzhiyun ++hdr.n_valid;
1783*4882a593Smuzhiyun if (__put_user(hpte[0], lbuf) ||
1784*4882a593Smuzhiyun __put_user(hpte[1], lbuf + 1))
1785*4882a593Smuzhiyun return -EFAULT;
1786*4882a593Smuzhiyun nb += HPTE_SIZE;
1787*4882a593Smuzhiyun lbuf += 2;
1788*4882a593Smuzhiyun ++i;
1789*4882a593Smuzhiyun hptp += 2;
1790*4882a593Smuzhiyun ++revp;
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun /* Now skip invalid entries while we can */
1793*4882a593Smuzhiyun while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1794*4882a593Smuzhiyun hdr.n_invalid < 0xffff &&
1795*4882a593Smuzhiyun record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
1796*4882a593Smuzhiyun /* found an invalid entry */
1797*4882a593Smuzhiyun ++hdr.n_invalid;
1798*4882a593Smuzhiyun ++i;
1799*4882a593Smuzhiyun hptp += 2;
1800*4882a593Smuzhiyun ++revp;
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun if (hdr.n_valid || hdr.n_invalid) {
1804*4882a593Smuzhiyun /* write back the header */
1805*4882a593Smuzhiyun if (__copy_to_user(hptr, &hdr, sizeof(hdr)))
1806*4882a593Smuzhiyun return -EFAULT;
1807*4882a593Smuzhiyun nw = nb;
1808*4882a593Smuzhiyun buf = (char __user *)lbuf;
1809*4882a593Smuzhiyun } else {
1810*4882a593Smuzhiyun nb = nw;
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun /* Check if we've wrapped around the hash table */
1814*4882a593Smuzhiyun if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
1815*4882a593Smuzhiyun i = 0;
1816*4882a593Smuzhiyun ctx->first_pass = 0;
1817*4882a593Smuzhiyun break;
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun ctx->index = i;
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun return nb;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun
kvm_htab_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1826*4882a593Smuzhiyun static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1827*4882a593Smuzhiyun size_t count, loff_t *ppos)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun struct kvm_htab_ctx *ctx = file->private_data;
1830*4882a593Smuzhiyun struct kvm *kvm = ctx->kvm;
1831*4882a593Smuzhiyun struct kvm_get_htab_header hdr;
1832*4882a593Smuzhiyun unsigned long i, j;
1833*4882a593Smuzhiyun unsigned long v, r;
1834*4882a593Smuzhiyun unsigned long __user *lbuf;
1835*4882a593Smuzhiyun __be64 *hptp;
1836*4882a593Smuzhiyun unsigned long tmp[2];
1837*4882a593Smuzhiyun ssize_t nb;
1838*4882a593Smuzhiyun long int err, ret;
1839*4882a593Smuzhiyun int mmu_ready;
1840*4882a593Smuzhiyun int pshift;
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun if (!access_ok(buf, count))
1843*4882a593Smuzhiyun return -EFAULT;
1844*4882a593Smuzhiyun if (kvm_is_radix(kvm))
1845*4882a593Smuzhiyun return -EINVAL;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun /* lock out vcpus from running while we're doing this */
1848*4882a593Smuzhiyun mutex_lock(&kvm->arch.mmu_setup_lock);
1849*4882a593Smuzhiyun mmu_ready = kvm->arch.mmu_ready;
1850*4882a593Smuzhiyun if (mmu_ready) {
1851*4882a593Smuzhiyun kvm->arch.mmu_ready = 0; /* temporarily */
1852*4882a593Smuzhiyun /* order mmu_ready vs. vcpus_running */
1853*4882a593Smuzhiyun smp_mb();
1854*4882a593Smuzhiyun if (atomic_read(&kvm->arch.vcpus_running)) {
1855*4882a593Smuzhiyun kvm->arch.mmu_ready = 1;
1856*4882a593Smuzhiyun mutex_unlock(&kvm->arch.mmu_setup_lock);
1857*4882a593Smuzhiyun return -EBUSY;
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun err = 0;
1862*4882a593Smuzhiyun for (nb = 0; nb + sizeof(hdr) <= count; ) {
1863*4882a593Smuzhiyun err = -EFAULT;
1864*4882a593Smuzhiyun if (__copy_from_user(&hdr, buf, sizeof(hdr)))
1865*4882a593Smuzhiyun break;
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun err = 0;
1868*4882a593Smuzhiyun if (nb + hdr.n_valid * HPTE_SIZE > count)
1869*4882a593Smuzhiyun break;
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun nb += sizeof(hdr);
1872*4882a593Smuzhiyun buf += sizeof(hdr);
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun err = -EINVAL;
1875*4882a593Smuzhiyun i = hdr.index;
1876*4882a593Smuzhiyun if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) ||
1877*4882a593Smuzhiyun i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt))
1878*4882a593Smuzhiyun break;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
1881*4882a593Smuzhiyun lbuf = (unsigned long __user *)buf;
1882*4882a593Smuzhiyun for (j = 0; j < hdr.n_valid; ++j) {
1883*4882a593Smuzhiyun __be64 hpte_v;
1884*4882a593Smuzhiyun __be64 hpte_r;
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun err = -EFAULT;
1887*4882a593Smuzhiyun if (__get_user(hpte_v, lbuf) ||
1888*4882a593Smuzhiyun __get_user(hpte_r, lbuf + 1))
1889*4882a593Smuzhiyun goto out;
1890*4882a593Smuzhiyun v = be64_to_cpu(hpte_v);
1891*4882a593Smuzhiyun r = be64_to_cpu(hpte_r);
1892*4882a593Smuzhiyun err = -EINVAL;
1893*4882a593Smuzhiyun if (!(v & HPTE_V_VALID))
1894*4882a593Smuzhiyun goto out;
1895*4882a593Smuzhiyun pshift = kvmppc_hpte_base_page_shift(v, r);
1896*4882a593Smuzhiyun if (pshift <= 0)
1897*4882a593Smuzhiyun goto out;
1898*4882a593Smuzhiyun lbuf += 2;
1899*4882a593Smuzhiyun nb += HPTE_SIZE;
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1902*4882a593Smuzhiyun kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1903*4882a593Smuzhiyun err = -EIO;
1904*4882a593Smuzhiyun ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
1905*4882a593Smuzhiyun tmp);
1906*4882a593Smuzhiyun if (ret != H_SUCCESS) {
1907*4882a593Smuzhiyun pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
1908*4882a593Smuzhiyun "r=%lx\n", ret, i, v, r);
1909*4882a593Smuzhiyun goto out;
1910*4882a593Smuzhiyun }
1911*4882a593Smuzhiyun if (!mmu_ready && is_vrma_hpte(v)) {
1912*4882a593Smuzhiyun unsigned long senc, lpcr;
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun senc = slb_pgsize_encoding(1ul << pshift);
1915*4882a593Smuzhiyun kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1916*4882a593Smuzhiyun (VRMA_VSID << SLB_VSID_SHIFT_1T);
1917*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1918*4882a593Smuzhiyun lpcr = senc << (LPCR_VRMASD_SH - 4);
1919*4882a593Smuzhiyun kvmppc_update_lpcr(kvm, lpcr,
1920*4882a593Smuzhiyun LPCR_VRMASD);
1921*4882a593Smuzhiyun } else {
1922*4882a593Smuzhiyun kvmppc_setup_partition_table(kvm);
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun mmu_ready = 1;
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun ++i;
1927*4882a593Smuzhiyun hptp += 2;
1928*4882a593Smuzhiyun }
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun for (j = 0; j < hdr.n_invalid; ++j) {
1931*4882a593Smuzhiyun if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1932*4882a593Smuzhiyun kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1933*4882a593Smuzhiyun ++i;
1934*4882a593Smuzhiyun hptp += 2;
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun err = 0;
1937*4882a593Smuzhiyun }
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun out:
1940*4882a593Smuzhiyun /* Order HPTE updates vs. mmu_ready */
1941*4882a593Smuzhiyun smp_wmb();
1942*4882a593Smuzhiyun kvm->arch.mmu_ready = mmu_ready;
1943*4882a593Smuzhiyun mutex_unlock(&kvm->arch.mmu_setup_lock);
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun if (err)
1946*4882a593Smuzhiyun return err;
1947*4882a593Smuzhiyun return nb;
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun
kvm_htab_release(struct inode * inode,struct file * filp)1950*4882a593Smuzhiyun static int kvm_htab_release(struct inode *inode, struct file *filp)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun struct kvm_htab_ctx *ctx = filp->private_data;
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun filp->private_data = NULL;
1955*4882a593Smuzhiyun if (!(ctx->flags & KVM_GET_HTAB_WRITE))
1956*4882a593Smuzhiyun atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
1957*4882a593Smuzhiyun kvm_put_kvm(ctx->kvm);
1958*4882a593Smuzhiyun kfree(ctx);
1959*4882a593Smuzhiyun return 0;
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun static const struct file_operations kvm_htab_fops = {
1963*4882a593Smuzhiyun .read = kvm_htab_read,
1964*4882a593Smuzhiyun .write = kvm_htab_write,
1965*4882a593Smuzhiyun .llseek = default_llseek,
1966*4882a593Smuzhiyun .release = kvm_htab_release,
1967*4882a593Smuzhiyun };
1968*4882a593Smuzhiyun
kvm_vm_ioctl_get_htab_fd(struct kvm * kvm,struct kvm_get_htab_fd * ghf)1969*4882a593Smuzhiyun int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
1970*4882a593Smuzhiyun {
1971*4882a593Smuzhiyun int ret;
1972*4882a593Smuzhiyun struct kvm_htab_ctx *ctx;
1973*4882a593Smuzhiyun int rwflag;
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun /* reject flags we don't recognize */
1976*4882a593Smuzhiyun if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE))
1977*4882a593Smuzhiyun return -EINVAL;
1978*4882a593Smuzhiyun ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1979*4882a593Smuzhiyun if (!ctx)
1980*4882a593Smuzhiyun return -ENOMEM;
1981*4882a593Smuzhiyun kvm_get_kvm(kvm);
1982*4882a593Smuzhiyun ctx->kvm = kvm;
1983*4882a593Smuzhiyun ctx->index = ghf->start_index;
1984*4882a593Smuzhiyun ctx->flags = ghf->flags;
1985*4882a593Smuzhiyun ctx->first_pass = 1;
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
1988*4882a593Smuzhiyun ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
1989*4882a593Smuzhiyun if (ret < 0) {
1990*4882a593Smuzhiyun kfree(ctx);
1991*4882a593Smuzhiyun kvm_put_kvm_no_destroy(kvm);
1992*4882a593Smuzhiyun return ret;
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun if (rwflag == O_RDONLY) {
1996*4882a593Smuzhiyun mutex_lock(&kvm->slots_lock);
1997*4882a593Smuzhiyun atomic_inc(&kvm->arch.hpte_mod_interest);
1998*4882a593Smuzhiyun /* make sure kvmppc_do_h_enter etc. see the increment */
1999*4882a593Smuzhiyun synchronize_srcu_expedited(&kvm->srcu);
2000*4882a593Smuzhiyun mutex_unlock(&kvm->slots_lock);
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun return ret;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun struct debugfs_htab_state {
2007*4882a593Smuzhiyun struct kvm *kvm;
2008*4882a593Smuzhiyun struct mutex mutex;
2009*4882a593Smuzhiyun unsigned long hpt_index;
2010*4882a593Smuzhiyun int chars_left;
2011*4882a593Smuzhiyun int buf_index;
2012*4882a593Smuzhiyun char buf[64];
2013*4882a593Smuzhiyun };
2014*4882a593Smuzhiyun
debugfs_htab_open(struct inode * inode,struct file * file)2015*4882a593Smuzhiyun static int debugfs_htab_open(struct inode *inode, struct file *file)
2016*4882a593Smuzhiyun {
2017*4882a593Smuzhiyun struct kvm *kvm = inode->i_private;
2018*4882a593Smuzhiyun struct debugfs_htab_state *p;
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun p = kzalloc(sizeof(*p), GFP_KERNEL);
2021*4882a593Smuzhiyun if (!p)
2022*4882a593Smuzhiyun return -ENOMEM;
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun kvm_get_kvm(kvm);
2025*4882a593Smuzhiyun p->kvm = kvm;
2026*4882a593Smuzhiyun mutex_init(&p->mutex);
2027*4882a593Smuzhiyun file->private_data = p;
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun return nonseekable_open(inode, file);
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun
debugfs_htab_release(struct inode * inode,struct file * file)2032*4882a593Smuzhiyun static int debugfs_htab_release(struct inode *inode, struct file *file)
2033*4882a593Smuzhiyun {
2034*4882a593Smuzhiyun struct debugfs_htab_state *p = file->private_data;
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun kvm_put_kvm(p->kvm);
2037*4882a593Smuzhiyun kfree(p);
2038*4882a593Smuzhiyun return 0;
2039*4882a593Smuzhiyun }
2040*4882a593Smuzhiyun
debugfs_htab_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)2041*4882a593Smuzhiyun static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
2042*4882a593Smuzhiyun size_t len, loff_t *ppos)
2043*4882a593Smuzhiyun {
2044*4882a593Smuzhiyun struct debugfs_htab_state *p = file->private_data;
2045*4882a593Smuzhiyun ssize_t ret, r;
2046*4882a593Smuzhiyun unsigned long i, n;
2047*4882a593Smuzhiyun unsigned long v, hr, gr;
2048*4882a593Smuzhiyun struct kvm *kvm;
2049*4882a593Smuzhiyun __be64 *hptp;
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun kvm = p->kvm;
2052*4882a593Smuzhiyun if (kvm_is_radix(kvm))
2053*4882a593Smuzhiyun return 0;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun ret = mutex_lock_interruptible(&p->mutex);
2056*4882a593Smuzhiyun if (ret)
2057*4882a593Smuzhiyun return ret;
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun if (p->chars_left) {
2060*4882a593Smuzhiyun n = p->chars_left;
2061*4882a593Smuzhiyun if (n > len)
2062*4882a593Smuzhiyun n = len;
2063*4882a593Smuzhiyun r = copy_to_user(buf, p->buf + p->buf_index, n);
2064*4882a593Smuzhiyun n -= r;
2065*4882a593Smuzhiyun p->chars_left -= n;
2066*4882a593Smuzhiyun p->buf_index += n;
2067*4882a593Smuzhiyun buf += n;
2068*4882a593Smuzhiyun len -= n;
2069*4882a593Smuzhiyun ret = n;
2070*4882a593Smuzhiyun if (r) {
2071*4882a593Smuzhiyun if (!n)
2072*4882a593Smuzhiyun ret = -EFAULT;
2073*4882a593Smuzhiyun goto out;
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun }
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun i = p->hpt_index;
2078*4882a593Smuzhiyun hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
2079*4882a593Smuzhiyun for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt);
2080*4882a593Smuzhiyun ++i, hptp += 2) {
2081*4882a593Smuzhiyun if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
2082*4882a593Smuzhiyun continue;
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun /* lock the HPTE so it's stable and read it */
2085*4882a593Smuzhiyun preempt_disable();
2086*4882a593Smuzhiyun while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
2087*4882a593Smuzhiyun cpu_relax();
2088*4882a593Smuzhiyun v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
2089*4882a593Smuzhiyun hr = be64_to_cpu(hptp[1]);
2090*4882a593Smuzhiyun gr = kvm->arch.hpt.rev[i].guest_rpte;
2091*4882a593Smuzhiyun unlock_hpte(hptp, v);
2092*4882a593Smuzhiyun preempt_enable();
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
2095*4882a593Smuzhiyun continue;
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun n = scnprintf(p->buf, sizeof(p->buf),
2098*4882a593Smuzhiyun "%6lx %.16lx %.16lx %.16lx\n",
2099*4882a593Smuzhiyun i, v, hr, gr);
2100*4882a593Smuzhiyun p->chars_left = n;
2101*4882a593Smuzhiyun if (n > len)
2102*4882a593Smuzhiyun n = len;
2103*4882a593Smuzhiyun r = copy_to_user(buf, p->buf, n);
2104*4882a593Smuzhiyun n -= r;
2105*4882a593Smuzhiyun p->chars_left -= n;
2106*4882a593Smuzhiyun p->buf_index = n;
2107*4882a593Smuzhiyun buf += n;
2108*4882a593Smuzhiyun len -= n;
2109*4882a593Smuzhiyun ret += n;
2110*4882a593Smuzhiyun if (r) {
2111*4882a593Smuzhiyun if (!ret)
2112*4882a593Smuzhiyun ret = -EFAULT;
2113*4882a593Smuzhiyun goto out;
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun p->hpt_index = i;
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun out:
2119*4882a593Smuzhiyun mutex_unlock(&p->mutex);
2120*4882a593Smuzhiyun return ret;
2121*4882a593Smuzhiyun }
2122*4882a593Smuzhiyun
debugfs_htab_write(struct file * file,const char __user * buf,size_t len,loff_t * ppos)2123*4882a593Smuzhiyun static ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
2124*4882a593Smuzhiyun size_t len, loff_t *ppos)
2125*4882a593Smuzhiyun {
2126*4882a593Smuzhiyun return -EACCES;
2127*4882a593Smuzhiyun }
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun static const struct file_operations debugfs_htab_fops = {
2130*4882a593Smuzhiyun .owner = THIS_MODULE,
2131*4882a593Smuzhiyun .open = debugfs_htab_open,
2132*4882a593Smuzhiyun .release = debugfs_htab_release,
2133*4882a593Smuzhiyun .read = debugfs_htab_read,
2134*4882a593Smuzhiyun .write = debugfs_htab_write,
2135*4882a593Smuzhiyun .llseek = generic_file_llseek,
2136*4882a593Smuzhiyun };
2137*4882a593Smuzhiyun
kvmppc_mmu_debugfs_init(struct kvm * kvm)2138*4882a593Smuzhiyun void kvmppc_mmu_debugfs_init(struct kvm *kvm)
2139*4882a593Smuzhiyun {
2140*4882a593Smuzhiyun debugfs_create_file("htab", 0400, kvm->arch.debugfs_dir, kvm,
2141*4882a593Smuzhiyun &debugfs_htab_fops);
2142*4882a593Smuzhiyun }
2143*4882a593Smuzhiyun
kvmppc_mmu_book3s_hv_init(struct kvm_vcpu * vcpu)2144*4882a593Smuzhiyun void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
2145*4882a593Smuzhiyun {
2146*4882a593Smuzhiyun struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
2153*4882a593Smuzhiyun }
2154