1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Secure pages management: Migration of pages between normal and secure
4*4882a593Smuzhiyun * memory of KVM guests.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun * A pseries guest can be run as secure guest on Ultravisor-enabled
11*4882a593Smuzhiyun * POWER platforms. On such platforms, this driver will be used to manage
12*4882a593Smuzhiyun * the movement of guest pages between the normal memory managed by
13*4882a593Smuzhiyun * hypervisor (HV) and secure memory managed by Ultravisor (UV).
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * The page-in or page-out requests from UV will come to HV as hcalls and
16*4882a593Smuzhiyun * HV will call back into UV via ultracalls to satisfy these page requests.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Private ZONE_DEVICE memory equal to the amount of secure memory
19*4882a593Smuzhiyun * available in the platform for running secure guests is hotplugged.
20*4882a593Smuzhiyun * Whenever a page belonging to the guest becomes secure, a page from this
21*4882a593Smuzhiyun * private device memory is used to represent and track that secure page
22*4882a593Smuzhiyun * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23*4882a593Smuzhiyun * shared between UV and HV. However such pages aren't represented by
24*4882a593Smuzhiyun * device private memory and mappings to shared memory exist in both
25*4882a593Smuzhiyun * UV and HV page tables.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Notes on locking
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32*4882a593Smuzhiyun * page-in and page-out requests for the same GPA. Concurrent accesses
33*4882a593Smuzhiyun * can either come via UV (guest vCPUs requesting for same page)
34*4882a593Smuzhiyun * or when HV and guest simultaneously access the same page.
35*4882a593Smuzhiyun * This mutex serializes the migration of page from HV(normal) to
36*4882a593Smuzhiyun * UV(secure) and vice versa. So the serialization points are around
37*4882a593Smuzhiyun * migrate_vma routines and page-in/out routines.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * Per-guest mutex comes with a cost though. Mainly it serializes the
40*4882a593Smuzhiyun * fault path as page-out can occur when HV faults on accessing secure
41*4882a593Smuzhiyun * guest pages. Currently UV issues page-in requests for all the guest
42*4882a593Smuzhiyun * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43*4882a593Smuzhiyun * not a cause for concern. Also currently the number of page-outs caused
44*4882a593Smuzhiyun * by HV touching secure pages is very very low. If an when UV supports
45*4882a593Smuzhiyun * overcommitting, then we might see concurrent guest driven page-outs.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * Locking order
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * 1. kvm->srcu - Protects KVM memslots
50*4882a593Smuzhiyun * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51*4882a593Smuzhiyun * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52*4882a593Smuzhiyun * as sync-points for page-in/out
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Notes on page size
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59*4882a593Smuzhiyun * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60*4882a593Smuzhiyun * secure GPAs at 64K page size and maintains one device PFN for each
61*4882a593Smuzhiyun * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62*4882a593Smuzhiyun * for 64K page at a time.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * HV faulting on secure pages: When HV touches any secure page, it
65*4882a593Smuzhiyun * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66*4882a593Smuzhiyun * UV splits and remaps the 2MB page if necessary and copies out the
67*4882a593Smuzhiyun * required 64K page contents.
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Shared pages: Whenever guest shares a secure page, UV will split and
70*4882a593Smuzhiyun * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * HV invalidating a page: When a regular page belonging to secure
73*4882a593Smuzhiyun * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74*4882a593Smuzhiyun * page size. Using 64K page size is correct here because any non-secure
75*4882a593Smuzhiyun * page will essentially be of 64K page size. Splitting by UV during sharing
76*4882a593Smuzhiyun * and page-out ensures this.
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * Page fault handling: When HV handles page fault of a page belonging
79*4882a593Smuzhiyun * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80*4882a593Smuzhiyun * Using 64K size is correct here too as UV would have split the 2MB page
81*4882a593Smuzhiyun * into 64k mappings and would have done page-outs earlier.
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * In summary, the current secure pages handling code in HV assumes
84*4882a593Smuzhiyun * 64K page size and in fact fails any page-in/page-out requests of
85*4882a593Smuzhiyun * non-64K size upfront. If and when UV starts supporting multiple
86*4882a593Smuzhiyun * page-sizes, we need to break this assumption.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #include <linux/pagemap.h>
90*4882a593Smuzhiyun #include <linux/migrate.h>
91*4882a593Smuzhiyun #include <linux/kvm_host.h>
92*4882a593Smuzhiyun #include <linux/ksm.h>
93*4882a593Smuzhiyun #include <asm/ultravisor.h>
94*4882a593Smuzhiyun #include <asm/mman.h>
95*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
96*4882a593Smuzhiyun #include <asm/kvm_book3s_uvmem.h>
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun static struct dev_pagemap kvmppc_uvmem_pgmap;
99*4882a593Smuzhiyun static unsigned long *kvmppc_uvmem_bitmap;
100*4882a593Smuzhiyun static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * States of a GFN
104*4882a593Smuzhiyun * ---------------
105*4882a593Smuzhiyun * The GFN can be in one of the following states.
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * (a) Secure - The GFN is secure. The GFN is associated with
108*4882a593Smuzhiyun * a Secure VM, the contents of the GFN is not accessible
109*4882a593Smuzhiyun * to the Hypervisor. This GFN can be backed by a secure-PFN,
110*4882a593Smuzhiyun * or can be backed by a normal-PFN with contents encrypted.
111*4882a593Smuzhiyun * The former is true when the GFN is paged-in into the
112*4882a593Smuzhiyun * ultravisor. The latter is true when the GFN is paged-out
113*4882a593Smuzhiyun * of the ultravisor.
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * (b) Shared - The GFN is shared. The GFN is associated with a
116*4882a593Smuzhiyun * a secure VM. The contents of the GFN is accessible to
117*4882a593Smuzhiyun * Hypervisor. This GFN is backed by a normal-PFN and its
118*4882a593Smuzhiyun * content is un-encrypted.
119*4882a593Smuzhiyun *
120*4882a593Smuzhiyun * (c) Normal - The GFN is a normal. The GFN is associated with
121*4882a593Smuzhiyun * a normal VM. The contents of the GFN is accesible to
122*4882a593Smuzhiyun * the Hypervisor. Its content is never encrypted.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * States of a VM.
125*4882a593Smuzhiyun * ---------------
126*4882a593Smuzhiyun *
127*4882a593Smuzhiyun * Normal VM: A VM whose contents are always accessible to
128*4882a593Smuzhiyun * the hypervisor. All its GFNs are normal-GFNs.
129*4882a593Smuzhiyun *
130*4882a593Smuzhiyun * Secure VM: A VM whose contents are not accessible to the
131*4882a593Smuzhiyun * hypervisor without the VM's consent. Its GFNs are
132*4882a593Smuzhiyun * either Shared-GFN or Secure-GFNs.
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * Transient VM: A Normal VM that is transitioning to secure VM.
135*4882a593Smuzhiyun * The transition starts on successful return of
136*4882a593Smuzhiyun * H_SVM_INIT_START, and ends on successful return
137*4882a593Smuzhiyun * of H_SVM_INIT_DONE. This transient VM, can have GFNs
138*4882a593Smuzhiyun * in any of the three states; i.e Secure-GFN, Shared-GFN,
139*4882a593Smuzhiyun * and Normal-GFN. The VM never executes in this state
140*4882a593Smuzhiyun * in supervisor-mode.
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * Memory slot State.
143*4882a593Smuzhiyun * -----------------------------
144*4882a593Smuzhiyun * The state of a memory slot mirrors the state of the
145*4882a593Smuzhiyun * VM the memory slot is associated with.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * VM State transition.
148*4882a593Smuzhiyun * --------------------
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * A VM always starts in Normal Mode.
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * H_SVM_INIT_START moves the VM into transient state. During this
153*4882a593Smuzhiyun * time the Ultravisor may request some of its GFNs to be shared or
154*4882a593Smuzhiyun * secured. So its GFNs can be in one of the three GFN states.
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun * H_SVM_INIT_DONE moves the VM entirely from transient state to
157*4882a593Smuzhiyun * secure-state. At this point any left-over normal-GFNs are
158*4882a593Smuzhiyun * transitioned to Secure-GFN.
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * H_SVM_INIT_ABORT moves the transient VM back to normal VM.
161*4882a593Smuzhiyun * All its GFNs are moved to Normal-GFNs.
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * UV_TERMINATE transitions the secure-VM back to normal-VM. All
164*4882a593Smuzhiyun * the secure-GFN and shared-GFNs are tranistioned to normal-GFN
165*4882a593Smuzhiyun * Note: The contents of the normal-GFN is undefined at this point.
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * GFN state implementation:
168*4882a593Smuzhiyun * -------------------------
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
171*4882a593Smuzhiyun * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
172*4882a593Smuzhiyun * set, and contains the value of the secure-PFN.
173*4882a593Smuzhiyun * It is associated with a normal-PFN; also called mem_pfn, when
174*4882a593Smuzhiyun * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set.
175*4882a593Smuzhiyun * The value of the normal-PFN is not tracked.
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * Shared GFN is associated with a normal-PFN. Its pfn[] has
178*4882a593Smuzhiyun * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
179*4882a593Smuzhiyun * is not tracked.
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * Normal GFN is associated with normal-PFN. Its pfn[] has
182*4882a593Smuzhiyun * no flag set. The value of the normal-PFN is not tracked.
183*4882a593Smuzhiyun *
184*4882a593Smuzhiyun * Life cycle of a GFN
185*4882a593Smuzhiyun * --------------------
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * --------------------------------------------------------------
188*4882a593Smuzhiyun * | | Share | Unshare | SVM |H_SVM_INIT_DONE|
189*4882a593Smuzhiyun * | |operation |operation | abort/ | |
190*4882a593Smuzhiyun * | | | | terminate | |
191*4882a593Smuzhiyun * -------------------------------------------------------------
192*4882a593Smuzhiyun * | | | | | |
193*4882a593Smuzhiyun * | Secure | Shared | Secure |Normal |Secure |
194*4882a593Smuzhiyun * | | | | | |
195*4882a593Smuzhiyun * | Shared | Shared | Secure |Normal |Shared |
196*4882a593Smuzhiyun * | | | | | |
197*4882a593Smuzhiyun * | Normal | Shared | Secure |Normal |Secure |
198*4882a593Smuzhiyun * --------------------------------------------------------------
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun * Life cycle of a VM
201*4882a593Smuzhiyun * --------------------
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * --------------------------------------------------------------------
204*4882a593Smuzhiyun * | | start | H_SVM_ |H_SVM_ |H_SVM_ |UV_SVM_ |
205*4882a593Smuzhiyun * | | VM |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE |
206*4882a593Smuzhiyun * | | | | | | |
207*4882a593Smuzhiyun * --------- ----------------------------------------------------------
208*4882a593Smuzhiyun * | | | | | | |
209*4882a593Smuzhiyun * | Normal | Normal | Transient|Error |Error |Normal |
210*4882a593Smuzhiyun * | | | | | | |
211*4882a593Smuzhiyun * | Secure | Error | Error |Error |Error |Normal |
212*4882a593Smuzhiyun * | | | | | | |
213*4882a593Smuzhiyun * |Transient| N/A | Error |Secure |Normal |Normal |
214*4882a593Smuzhiyun * --------------------------------------------------------------------
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun #define KVMPPC_GFN_UVMEM_PFN (1UL << 63)
218*4882a593Smuzhiyun #define KVMPPC_GFN_MEM_PFN (1UL << 62)
219*4882a593Smuzhiyun #define KVMPPC_GFN_SHARED (1UL << 61)
220*4882a593Smuzhiyun #define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
221*4882a593Smuzhiyun #define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
222*4882a593Smuzhiyun #define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK)
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun struct kvmppc_uvmem_slot {
225*4882a593Smuzhiyun struct list_head list;
226*4882a593Smuzhiyun unsigned long nr_pfns;
227*4882a593Smuzhiyun unsigned long base_pfn;
228*4882a593Smuzhiyun unsigned long *pfns;
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun struct kvmppc_uvmem_page_pvt {
231*4882a593Smuzhiyun struct kvm *kvm;
232*4882a593Smuzhiyun unsigned long gpa;
233*4882a593Smuzhiyun bool skip_page_out;
234*4882a593Smuzhiyun bool remove_gfn;
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun
kvmppc_uvmem_available(void)237*4882a593Smuzhiyun bool kvmppc_uvmem_available(void)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
241*4882a593Smuzhiyun * and our data structures have been initialized successfully.
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun return !!kvmppc_uvmem_bitmap;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
kvmppc_uvmem_slot_init(struct kvm * kvm,const struct kvm_memory_slot * slot)246*4882a593Smuzhiyun int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct kvmppc_uvmem_slot *p;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun p = kzalloc(sizeof(*p), GFP_KERNEL);
251*4882a593Smuzhiyun if (!p)
252*4882a593Smuzhiyun return -ENOMEM;
253*4882a593Smuzhiyun p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns)));
254*4882a593Smuzhiyun if (!p->pfns) {
255*4882a593Smuzhiyun kfree(p);
256*4882a593Smuzhiyun return -ENOMEM;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun p->nr_pfns = slot->npages;
259*4882a593Smuzhiyun p->base_pfn = slot->base_gfn;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
262*4882a593Smuzhiyun list_add(&p->list, &kvm->arch.uvmem_pfns);
263*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun return 0;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * All device PFNs are already released by the time we come here.
270*4882a593Smuzhiyun */
kvmppc_uvmem_slot_free(struct kvm * kvm,const struct kvm_memory_slot * slot)271*4882a593Smuzhiyun void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct kvmppc_uvmem_slot *p, *next;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
276*4882a593Smuzhiyun list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
277*4882a593Smuzhiyun if (p->base_pfn == slot->base_gfn) {
278*4882a593Smuzhiyun vfree(p->pfns);
279*4882a593Smuzhiyun list_del(&p->list);
280*4882a593Smuzhiyun kfree(p);
281*4882a593Smuzhiyun break;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
kvmppc_mark_gfn(unsigned long gfn,struct kvm * kvm,unsigned long flag,unsigned long uvmem_pfn)287*4882a593Smuzhiyun static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
288*4882a593Smuzhiyun unsigned long flag, unsigned long uvmem_pfn)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct kvmppc_uvmem_slot *p;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
293*4882a593Smuzhiyun if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
294*4882a593Smuzhiyun unsigned long index = gfn - p->base_pfn;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (flag == KVMPPC_GFN_UVMEM_PFN)
297*4882a593Smuzhiyun p->pfns[index] = uvmem_pfn | flag;
298*4882a593Smuzhiyun else
299*4882a593Smuzhiyun p->pfns[index] = flag;
300*4882a593Smuzhiyun return;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,unsigned long uvmem_pfn,struct kvm * kvm)306*4882a593Smuzhiyun static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
307*4882a593Smuzhiyun unsigned long uvmem_pfn, struct kvm *kvm)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* mark the GFN as secure-GFN associated with a memory-PFN. */
kvmppc_gfn_secure_mem_pfn(unsigned long gfn,struct kvm * kvm)313*4882a593Smuzhiyun static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* mark the GFN as a shared GFN. */
kvmppc_gfn_shared(unsigned long gfn,struct kvm * kvm)319*4882a593Smuzhiyun static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* mark the GFN as a non-existent GFN. */
kvmppc_gfn_remove(unsigned long gfn,struct kvm * kvm)325*4882a593Smuzhiyun static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun kvmppc_mark_gfn(gfn, kvm, 0, 0);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* return true, if the GFN is a secure-GFN backed by a secure-PFN */
kvmppc_gfn_is_uvmem_pfn(unsigned long gfn,struct kvm * kvm,unsigned long * uvmem_pfn)331*4882a593Smuzhiyun static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
332*4882a593Smuzhiyun unsigned long *uvmem_pfn)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct kvmppc_uvmem_slot *p;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
337*4882a593Smuzhiyun if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
338*4882a593Smuzhiyun unsigned long index = gfn - p->base_pfn;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
341*4882a593Smuzhiyun if (uvmem_pfn)
342*4882a593Smuzhiyun *uvmem_pfn = p->pfns[index] &
343*4882a593Smuzhiyun KVMPPC_GFN_PFN_MASK;
344*4882a593Smuzhiyun return true;
345*4882a593Smuzhiyun } else
346*4882a593Smuzhiyun return false;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun return false;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun * starting from *gfn search for the next available GFN that is not yet
354*4882a593Smuzhiyun * transitioned to a secure GFN. return the value of that GFN in *gfn. If a
355*4882a593Smuzhiyun * GFN is found, return true, else return false
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * Must be called with kvm->arch.uvmem_lock held.
358*4882a593Smuzhiyun */
kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot * memslot,struct kvm * kvm,unsigned long * gfn)359*4882a593Smuzhiyun static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
360*4882a593Smuzhiyun struct kvm *kvm, unsigned long *gfn)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun struct kvmppc_uvmem_slot *p = NULL, *iter;
363*4882a593Smuzhiyun bool ret = false;
364*4882a593Smuzhiyun unsigned long i;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
367*4882a593Smuzhiyun if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
368*4882a593Smuzhiyun p = iter;
369*4882a593Smuzhiyun break;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun if (!p)
372*4882a593Smuzhiyun return ret;
373*4882a593Smuzhiyun /*
374*4882a593Smuzhiyun * The code below assumes, one to one correspondence between
375*4882a593Smuzhiyun * kvmppc_uvmem_slot and memslot.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
378*4882a593Smuzhiyun unsigned long index = i - p->base_pfn;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
381*4882a593Smuzhiyun *gfn = i;
382*4882a593Smuzhiyun ret = true;
383*4882a593Smuzhiyun break;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun return ret;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
kvmppc_memslot_page_merge(struct kvm * kvm,const struct kvm_memory_slot * memslot,bool merge)389*4882a593Smuzhiyun static int kvmppc_memslot_page_merge(struct kvm *kvm,
390*4882a593Smuzhiyun const struct kvm_memory_slot *memslot, bool merge)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun unsigned long gfn = memslot->base_gfn;
393*4882a593Smuzhiyun unsigned long end, start = gfn_to_hva(kvm, gfn);
394*4882a593Smuzhiyun int ret = 0;
395*4882a593Smuzhiyun struct vm_area_struct *vma;
396*4882a593Smuzhiyun int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (kvm_is_error_hva(start))
399*4882a593Smuzhiyun return H_STATE;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun end = start + (memslot->npages << PAGE_SHIFT);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun mmap_write_lock(kvm->mm);
404*4882a593Smuzhiyun do {
405*4882a593Smuzhiyun vma = find_vma_intersection(kvm->mm, start, end);
406*4882a593Smuzhiyun if (!vma) {
407*4882a593Smuzhiyun ret = H_STATE;
408*4882a593Smuzhiyun break;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
411*4882a593Smuzhiyun merge_flag, &vma->vm_flags);
412*4882a593Smuzhiyun if (ret) {
413*4882a593Smuzhiyun ret = H_STATE;
414*4882a593Smuzhiyun break;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun start = vma->vm_end;
417*4882a593Smuzhiyun } while (end > vma->vm_end);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun mmap_write_unlock(kvm->mm);
420*4882a593Smuzhiyun return ret;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
__kvmppc_uvmem_memslot_delete(struct kvm * kvm,const struct kvm_memory_slot * memslot)423*4882a593Smuzhiyun static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
424*4882a593Smuzhiyun const struct kvm_memory_slot *memslot)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
427*4882a593Smuzhiyun kvmppc_uvmem_slot_free(kvm, memslot);
428*4882a593Smuzhiyun kvmppc_memslot_page_merge(kvm, memslot, true);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
__kvmppc_uvmem_memslot_create(struct kvm * kvm,const struct kvm_memory_slot * memslot)431*4882a593Smuzhiyun static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
432*4882a593Smuzhiyun const struct kvm_memory_slot *memslot)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun int ret = H_PARAMETER;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (kvmppc_memslot_page_merge(kvm, memslot, false))
437*4882a593Smuzhiyun return ret;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (kvmppc_uvmem_slot_init(kvm, memslot))
440*4882a593Smuzhiyun goto out1;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun ret = uv_register_mem_slot(kvm->arch.lpid,
443*4882a593Smuzhiyun memslot->base_gfn << PAGE_SHIFT,
444*4882a593Smuzhiyun memslot->npages * PAGE_SIZE,
445*4882a593Smuzhiyun 0, memslot->id);
446*4882a593Smuzhiyun if (ret < 0) {
447*4882a593Smuzhiyun ret = H_PARAMETER;
448*4882a593Smuzhiyun goto out;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun return 0;
451*4882a593Smuzhiyun out:
452*4882a593Smuzhiyun kvmppc_uvmem_slot_free(kvm, memslot);
453*4882a593Smuzhiyun out1:
454*4882a593Smuzhiyun kvmppc_memslot_page_merge(kvm, memslot, true);
455*4882a593Smuzhiyun return ret;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
kvmppc_h_svm_init_start(struct kvm * kvm)458*4882a593Smuzhiyun unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct kvm_memslots *slots;
461*4882a593Smuzhiyun struct kvm_memory_slot *memslot, *m;
462*4882a593Smuzhiyun int ret = H_SUCCESS;
463*4882a593Smuzhiyun int srcu_idx;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (!kvmppc_uvmem_bitmap)
468*4882a593Smuzhiyun return H_UNSUPPORTED;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* Only radix guests can be secure guests */
471*4882a593Smuzhiyun if (!kvm_is_radix(kvm))
472*4882a593Smuzhiyun return H_UNSUPPORTED;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* NAK the transition to secure if not enabled */
475*4882a593Smuzhiyun if (!kvm->arch.svm_enabled)
476*4882a593Smuzhiyun return H_AUTHORITY;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* register the memslot */
481*4882a593Smuzhiyun slots = kvm_memslots(kvm);
482*4882a593Smuzhiyun kvm_for_each_memslot(memslot, slots) {
483*4882a593Smuzhiyun ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
484*4882a593Smuzhiyun if (ret)
485*4882a593Smuzhiyun break;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (ret) {
489*4882a593Smuzhiyun slots = kvm_memslots(kvm);
490*4882a593Smuzhiyun kvm_for_each_memslot(m, slots) {
491*4882a593Smuzhiyun if (m == memslot)
492*4882a593Smuzhiyun break;
493*4882a593Smuzhiyun __kvmppc_uvmem_memslot_delete(kvm, memslot);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
498*4882a593Smuzhiyun return ret;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun * Provision a new page on HV side and copy over the contents
503*4882a593Smuzhiyun * from secure memory using UV_PAGE_OUT uvcall.
504*4882a593Smuzhiyun * Caller must held kvm->arch.uvmem_lock.
505*4882a593Smuzhiyun */
__kvmppc_svm_page_out(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long page_shift,struct kvm * kvm,unsigned long gpa)506*4882a593Smuzhiyun static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
507*4882a593Smuzhiyun unsigned long start,
508*4882a593Smuzhiyun unsigned long end, unsigned long page_shift,
509*4882a593Smuzhiyun struct kvm *kvm, unsigned long gpa)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun unsigned long src_pfn, dst_pfn = 0;
512*4882a593Smuzhiyun struct migrate_vma mig;
513*4882a593Smuzhiyun struct page *dpage, *spage;
514*4882a593Smuzhiyun struct kvmppc_uvmem_page_pvt *pvt;
515*4882a593Smuzhiyun unsigned long pfn;
516*4882a593Smuzhiyun int ret = U_SUCCESS;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun memset(&mig, 0, sizeof(mig));
519*4882a593Smuzhiyun mig.vma = vma;
520*4882a593Smuzhiyun mig.start = start;
521*4882a593Smuzhiyun mig.end = end;
522*4882a593Smuzhiyun mig.src = &src_pfn;
523*4882a593Smuzhiyun mig.dst = &dst_pfn;
524*4882a593Smuzhiyun mig.pgmap_owner = &kvmppc_uvmem_pgmap;
525*4882a593Smuzhiyun mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* The requested page is already paged-out, nothing to do */
528*4882a593Smuzhiyun if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
529*4882a593Smuzhiyun return ret;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun ret = migrate_vma_setup(&mig);
532*4882a593Smuzhiyun if (ret)
533*4882a593Smuzhiyun return -1;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun spage = migrate_pfn_to_page(*mig.src);
536*4882a593Smuzhiyun if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
537*4882a593Smuzhiyun goto out_finalize;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (!is_zone_device_page(spage))
540*4882a593Smuzhiyun goto out_finalize;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
543*4882a593Smuzhiyun if (!dpage) {
544*4882a593Smuzhiyun ret = -1;
545*4882a593Smuzhiyun goto out_finalize;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun lock_page(dpage);
549*4882a593Smuzhiyun pvt = spage->zone_device_data;
550*4882a593Smuzhiyun pfn = page_to_pfn(dpage);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * This function is used in two cases:
554*4882a593Smuzhiyun * - When HV touches a secure page, for which we do UV_PAGE_OUT
555*4882a593Smuzhiyun * - When a secure page is converted to shared page, we *get*
556*4882a593Smuzhiyun * the page to essentially unmap the device page. In this
557*4882a593Smuzhiyun * case we skip page-out.
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun if (!pvt->skip_page_out)
560*4882a593Smuzhiyun ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
561*4882a593Smuzhiyun gpa, 0, page_shift);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (ret == U_SUCCESS)
564*4882a593Smuzhiyun *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
565*4882a593Smuzhiyun else {
566*4882a593Smuzhiyun unlock_page(dpage);
567*4882a593Smuzhiyun __free_page(dpage);
568*4882a593Smuzhiyun goto out_finalize;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun migrate_vma_pages(&mig);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun out_finalize:
574*4882a593Smuzhiyun migrate_vma_finalize(&mig);
575*4882a593Smuzhiyun return ret;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
kvmppc_svm_page_out(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long page_shift,struct kvm * kvm,unsigned long gpa)578*4882a593Smuzhiyun static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
579*4882a593Smuzhiyun unsigned long start, unsigned long end,
580*4882a593Smuzhiyun unsigned long page_shift,
581*4882a593Smuzhiyun struct kvm *kvm, unsigned long gpa)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun int ret;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
586*4882a593Smuzhiyun ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
587*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun return ret;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /*
593*4882a593Smuzhiyun * Drop device pages that we maintain for the secure guest
594*4882a593Smuzhiyun *
595*4882a593Smuzhiyun * We first mark the pages to be skipped from UV_PAGE_OUT when there
596*4882a593Smuzhiyun * is HV side fault on these pages. Next we *get* these pages, forcing
597*4882a593Smuzhiyun * fault on them, do fault time migration to replace the device PTEs in
598*4882a593Smuzhiyun * QEMU page table with normal PTEs from newly allocated pages.
599*4882a593Smuzhiyun */
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot * slot,struct kvm * kvm,bool skip_page_out)600*4882a593Smuzhiyun void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
601*4882a593Smuzhiyun struct kvm *kvm, bool skip_page_out)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun int i;
604*4882a593Smuzhiyun struct kvmppc_uvmem_page_pvt *pvt;
605*4882a593Smuzhiyun struct page *uvmem_page;
606*4882a593Smuzhiyun struct vm_area_struct *vma = NULL;
607*4882a593Smuzhiyun unsigned long uvmem_pfn, gfn;
608*4882a593Smuzhiyun unsigned long addr;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun mmap_read_lock(kvm->mm);
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun addr = slot->userspace_addr;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun gfn = slot->base_gfn;
615*4882a593Smuzhiyun for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* Fetch the VMA if addr is not in the latest fetched one */
618*4882a593Smuzhiyun if (!vma || addr >= vma->vm_end) {
619*4882a593Smuzhiyun vma = find_vma_intersection(kvm->mm, addr, addr+1);
620*4882a593Smuzhiyun if (!vma) {
621*4882a593Smuzhiyun pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
622*4882a593Smuzhiyun break;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
629*4882a593Smuzhiyun uvmem_page = pfn_to_page(uvmem_pfn);
630*4882a593Smuzhiyun pvt = uvmem_page->zone_device_data;
631*4882a593Smuzhiyun pvt->skip_page_out = skip_page_out;
632*4882a593Smuzhiyun pvt->remove_gfn = true;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
635*4882a593Smuzhiyun PAGE_SHIFT, kvm, pvt->gpa))
636*4882a593Smuzhiyun pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
637*4882a593Smuzhiyun pvt->gpa, addr);
638*4882a593Smuzhiyun } else {
639*4882a593Smuzhiyun /* Remove the shared flag if any */
640*4882a593Smuzhiyun kvmppc_gfn_remove(gfn, kvm);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun mmap_read_unlock(kvm->mm);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
kvmppc_h_svm_init_abort(struct kvm * kvm)649*4882a593Smuzhiyun unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun int srcu_idx;
652*4882a593Smuzhiyun struct kvm_memory_slot *memslot;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /*
655*4882a593Smuzhiyun * Expect to be called only after INIT_START and before INIT_DONE.
656*4882a593Smuzhiyun * If INIT_DONE was completed, use normal VM termination sequence.
657*4882a593Smuzhiyun */
658*4882a593Smuzhiyun if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
659*4882a593Smuzhiyun return H_UNSUPPORTED;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
662*4882a593Smuzhiyun return H_STATE;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun kvm_for_each_memslot(memslot, kvm_memslots(kvm))
667*4882a593Smuzhiyun kvmppc_uvmem_drop_pages(memslot, kvm, false);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun kvm->arch.secure_guest = 0;
672*4882a593Smuzhiyun uv_svm_terminate(kvm->arch.lpid);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun return H_PARAMETER;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /*
678*4882a593Smuzhiyun * Get a free device PFN from the pool
679*4882a593Smuzhiyun *
680*4882a593Smuzhiyun * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
681*4882a593Smuzhiyun * PFN will be used to keep track of the secure page on HV side.
682*4882a593Smuzhiyun *
683*4882a593Smuzhiyun * Called with kvm->arch.uvmem_lock held
684*4882a593Smuzhiyun */
kvmppc_uvmem_get_page(unsigned long gpa,struct kvm * kvm)685*4882a593Smuzhiyun static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct page *dpage = NULL;
688*4882a593Smuzhiyun unsigned long bit, uvmem_pfn;
689*4882a593Smuzhiyun struct kvmppc_uvmem_page_pvt *pvt;
690*4882a593Smuzhiyun unsigned long pfn_last, pfn_first;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
693*4882a593Smuzhiyun pfn_last = pfn_first +
694*4882a593Smuzhiyun (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun spin_lock(&kvmppc_uvmem_bitmap_lock);
697*4882a593Smuzhiyun bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
698*4882a593Smuzhiyun pfn_last - pfn_first);
699*4882a593Smuzhiyun if (bit >= (pfn_last - pfn_first))
700*4882a593Smuzhiyun goto out;
701*4882a593Smuzhiyun bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
702*4882a593Smuzhiyun spin_unlock(&kvmppc_uvmem_bitmap_lock);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
705*4882a593Smuzhiyun if (!pvt)
706*4882a593Smuzhiyun goto out_clear;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun uvmem_pfn = bit + pfn_first;
709*4882a593Smuzhiyun kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun pvt->gpa = gpa;
712*4882a593Smuzhiyun pvt->kvm = kvm;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun dpage = pfn_to_page(uvmem_pfn);
715*4882a593Smuzhiyun dpage->zone_device_data = pvt;
716*4882a593Smuzhiyun get_page(dpage);
717*4882a593Smuzhiyun lock_page(dpage);
718*4882a593Smuzhiyun return dpage;
719*4882a593Smuzhiyun out_clear:
720*4882a593Smuzhiyun spin_lock(&kvmppc_uvmem_bitmap_lock);
721*4882a593Smuzhiyun bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
722*4882a593Smuzhiyun out:
723*4882a593Smuzhiyun spin_unlock(&kvmppc_uvmem_bitmap_lock);
724*4882a593Smuzhiyun return NULL;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /*
728*4882a593Smuzhiyun * Alloc a PFN from private device memory pool. If @pagein is true,
729*4882a593Smuzhiyun * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
730*4882a593Smuzhiyun */
kvmppc_svm_page_in(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long gpa,struct kvm * kvm,unsigned long page_shift,bool pagein)731*4882a593Smuzhiyun static int kvmppc_svm_page_in(struct vm_area_struct *vma,
732*4882a593Smuzhiyun unsigned long start,
733*4882a593Smuzhiyun unsigned long end, unsigned long gpa, struct kvm *kvm,
734*4882a593Smuzhiyun unsigned long page_shift,
735*4882a593Smuzhiyun bool pagein)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun unsigned long src_pfn, dst_pfn = 0;
738*4882a593Smuzhiyun struct migrate_vma mig;
739*4882a593Smuzhiyun struct page *spage;
740*4882a593Smuzhiyun unsigned long pfn;
741*4882a593Smuzhiyun struct page *dpage;
742*4882a593Smuzhiyun int ret = 0;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun memset(&mig, 0, sizeof(mig));
745*4882a593Smuzhiyun mig.vma = vma;
746*4882a593Smuzhiyun mig.start = start;
747*4882a593Smuzhiyun mig.end = end;
748*4882a593Smuzhiyun mig.src = &src_pfn;
749*4882a593Smuzhiyun mig.dst = &dst_pfn;
750*4882a593Smuzhiyun mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun ret = migrate_vma_setup(&mig);
753*4882a593Smuzhiyun if (ret)
754*4882a593Smuzhiyun return ret;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
757*4882a593Smuzhiyun ret = -1;
758*4882a593Smuzhiyun goto out_finalize;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun dpage = kvmppc_uvmem_get_page(gpa, kvm);
762*4882a593Smuzhiyun if (!dpage) {
763*4882a593Smuzhiyun ret = -1;
764*4882a593Smuzhiyun goto out_finalize;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun if (pagein) {
768*4882a593Smuzhiyun pfn = *mig.src >> MIGRATE_PFN_SHIFT;
769*4882a593Smuzhiyun spage = migrate_pfn_to_page(*mig.src);
770*4882a593Smuzhiyun if (spage) {
771*4882a593Smuzhiyun ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
772*4882a593Smuzhiyun gpa, 0, page_shift);
773*4882a593Smuzhiyun if (ret)
774*4882a593Smuzhiyun goto out_finalize;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
779*4882a593Smuzhiyun migrate_vma_pages(&mig);
780*4882a593Smuzhiyun out_finalize:
781*4882a593Smuzhiyun migrate_vma_finalize(&mig);
782*4882a593Smuzhiyun return ret;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
kvmppc_uv_migrate_mem_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot)785*4882a593Smuzhiyun static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
786*4882a593Smuzhiyun const struct kvm_memory_slot *memslot)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun unsigned long gfn = memslot->base_gfn;
789*4882a593Smuzhiyun struct vm_area_struct *vma;
790*4882a593Smuzhiyun unsigned long start, end;
791*4882a593Smuzhiyun int ret = 0;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun mmap_read_lock(kvm->mm);
794*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
795*4882a593Smuzhiyun while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
796*4882a593Smuzhiyun ret = H_STATE;
797*4882a593Smuzhiyun start = gfn_to_hva(kvm, gfn);
798*4882a593Smuzhiyun if (kvm_is_error_hva(start))
799*4882a593Smuzhiyun break;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun end = start + (1UL << PAGE_SHIFT);
802*4882a593Smuzhiyun vma = find_vma_intersection(kvm->mm, start, end);
803*4882a593Smuzhiyun if (!vma || vma->vm_start > start || vma->vm_end < end)
804*4882a593Smuzhiyun break;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun ret = kvmppc_svm_page_in(vma, start, end,
807*4882a593Smuzhiyun (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
808*4882a593Smuzhiyun if (ret) {
809*4882a593Smuzhiyun ret = H_STATE;
810*4882a593Smuzhiyun break;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /* relinquish the cpu if needed */
814*4882a593Smuzhiyun cond_resched();
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
817*4882a593Smuzhiyun mmap_read_unlock(kvm->mm);
818*4882a593Smuzhiyun return ret;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
kvmppc_h_svm_init_done(struct kvm * kvm)821*4882a593Smuzhiyun unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun struct kvm_memslots *slots;
824*4882a593Smuzhiyun struct kvm_memory_slot *memslot;
825*4882a593Smuzhiyun int srcu_idx;
826*4882a593Smuzhiyun long ret = H_SUCCESS;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
829*4882a593Smuzhiyun return H_UNSUPPORTED;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* migrate any unmoved normal pfn to device pfns*/
832*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
833*4882a593Smuzhiyun slots = kvm_memslots(kvm);
834*4882a593Smuzhiyun kvm_for_each_memslot(memslot, slots) {
835*4882a593Smuzhiyun ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
836*4882a593Smuzhiyun if (ret) {
837*4882a593Smuzhiyun /*
838*4882a593Smuzhiyun * The pages will remain transitioned.
839*4882a593Smuzhiyun * Its the callers responsibility to
840*4882a593Smuzhiyun * terminate the VM, which will undo
841*4882a593Smuzhiyun * all state of the VM. Till then
842*4882a593Smuzhiyun * this VM is in a erroneous state.
843*4882a593Smuzhiyun * Its KVMPPC_SECURE_INIT_DONE will
844*4882a593Smuzhiyun * remain unset.
845*4882a593Smuzhiyun */
846*4882a593Smuzhiyun ret = H_STATE;
847*4882a593Smuzhiyun goto out;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
852*4882a593Smuzhiyun pr_info("LPID %d went secure\n", kvm->arch.lpid);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun out:
855*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
856*4882a593Smuzhiyun return ret;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /*
860*4882a593Smuzhiyun * Shares the page with HV, thus making it a normal page.
861*4882a593Smuzhiyun *
862*4882a593Smuzhiyun * - If the page is already secure, then provision a new page and share
863*4882a593Smuzhiyun * - If the page is a normal page, share the existing page
864*4882a593Smuzhiyun *
865*4882a593Smuzhiyun * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
866*4882a593Smuzhiyun * to unmap the device page from QEMU's page tables.
867*4882a593Smuzhiyun */
kvmppc_share_page(struct kvm * kvm,unsigned long gpa,unsigned long page_shift)868*4882a593Smuzhiyun static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
869*4882a593Smuzhiyun unsigned long page_shift)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun int ret = H_PARAMETER;
873*4882a593Smuzhiyun struct page *uvmem_page;
874*4882a593Smuzhiyun struct kvmppc_uvmem_page_pvt *pvt;
875*4882a593Smuzhiyun unsigned long pfn;
876*4882a593Smuzhiyun unsigned long gfn = gpa >> page_shift;
877*4882a593Smuzhiyun int srcu_idx;
878*4882a593Smuzhiyun unsigned long uvmem_pfn;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
881*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
882*4882a593Smuzhiyun if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
883*4882a593Smuzhiyun uvmem_page = pfn_to_page(uvmem_pfn);
884*4882a593Smuzhiyun pvt = uvmem_page->zone_device_data;
885*4882a593Smuzhiyun pvt->skip_page_out = true;
886*4882a593Smuzhiyun /*
887*4882a593Smuzhiyun * do not drop the GFN. It is a valid GFN
888*4882a593Smuzhiyun * that is transitioned to a shared GFN.
889*4882a593Smuzhiyun */
890*4882a593Smuzhiyun pvt->remove_gfn = false;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun retry:
894*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
895*4882a593Smuzhiyun pfn = gfn_to_pfn(kvm, gfn);
896*4882a593Smuzhiyun if (is_error_noslot_pfn(pfn))
897*4882a593Smuzhiyun goto out;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
900*4882a593Smuzhiyun if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
901*4882a593Smuzhiyun uvmem_page = pfn_to_page(uvmem_pfn);
902*4882a593Smuzhiyun pvt = uvmem_page->zone_device_data;
903*4882a593Smuzhiyun pvt->skip_page_out = true;
904*4882a593Smuzhiyun pvt->remove_gfn = false; /* it continues to be a valid GFN */
905*4882a593Smuzhiyun kvm_release_pfn_clean(pfn);
906*4882a593Smuzhiyun goto retry;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
910*4882a593Smuzhiyun page_shift)) {
911*4882a593Smuzhiyun kvmppc_gfn_shared(gfn, kvm);
912*4882a593Smuzhiyun ret = H_SUCCESS;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun kvm_release_pfn_clean(pfn);
915*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
916*4882a593Smuzhiyun out:
917*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
918*4882a593Smuzhiyun return ret;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /*
922*4882a593Smuzhiyun * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
923*4882a593Smuzhiyun *
924*4882a593Smuzhiyun * H_PAGE_IN_SHARED flag makes the page shared which means that the same
925*4882a593Smuzhiyun * memory in is visible from both UV and HV.
926*4882a593Smuzhiyun */
kvmppc_h_svm_page_in(struct kvm * kvm,unsigned long gpa,unsigned long flags,unsigned long page_shift)927*4882a593Smuzhiyun unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
928*4882a593Smuzhiyun unsigned long flags,
929*4882a593Smuzhiyun unsigned long page_shift)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun unsigned long start, end;
932*4882a593Smuzhiyun struct vm_area_struct *vma;
933*4882a593Smuzhiyun int srcu_idx;
934*4882a593Smuzhiyun unsigned long gfn = gpa >> page_shift;
935*4882a593Smuzhiyun int ret;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
938*4882a593Smuzhiyun return H_UNSUPPORTED;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (page_shift != PAGE_SHIFT)
941*4882a593Smuzhiyun return H_P3;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (flags & ~H_PAGE_IN_SHARED)
944*4882a593Smuzhiyun return H_P2;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (flags & H_PAGE_IN_SHARED)
947*4882a593Smuzhiyun return kvmppc_share_page(kvm, gpa, page_shift);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun ret = H_PARAMETER;
950*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
951*4882a593Smuzhiyun mmap_read_lock(kvm->mm);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun start = gfn_to_hva(kvm, gfn);
954*4882a593Smuzhiyun if (kvm_is_error_hva(start))
955*4882a593Smuzhiyun goto out;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
958*4882a593Smuzhiyun /* Fail the page-in request of an already paged-in page */
959*4882a593Smuzhiyun if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
960*4882a593Smuzhiyun goto out_unlock;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun end = start + (1UL << page_shift);
963*4882a593Smuzhiyun vma = find_vma_intersection(kvm->mm, start, end);
964*4882a593Smuzhiyun if (!vma || vma->vm_start > start || vma->vm_end < end)
965*4882a593Smuzhiyun goto out_unlock;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
968*4882a593Smuzhiyun true))
969*4882a593Smuzhiyun goto out_unlock;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun ret = H_SUCCESS;
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun out_unlock:
974*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
975*4882a593Smuzhiyun out:
976*4882a593Smuzhiyun mmap_read_unlock(kvm->mm);
977*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
978*4882a593Smuzhiyun return ret;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun /*
983*4882a593Smuzhiyun * Fault handler callback that gets called when HV touches any page that
984*4882a593Smuzhiyun * has been moved to secure memory, we ask UV to give back the page by
985*4882a593Smuzhiyun * issuing UV_PAGE_OUT uvcall.
986*4882a593Smuzhiyun *
987*4882a593Smuzhiyun * This eventually results in dropping of device PFN and the newly
988*4882a593Smuzhiyun * provisioned page/PFN gets populated in QEMU page tables.
989*4882a593Smuzhiyun */
kvmppc_uvmem_migrate_to_ram(struct vm_fault * vmf)990*4882a593Smuzhiyun static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun if (kvmppc_svm_page_out(vmf->vma, vmf->address,
995*4882a593Smuzhiyun vmf->address + PAGE_SIZE, PAGE_SHIFT,
996*4882a593Smuzhiyun pvt->kvm, pvt->gpa))
997*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
998*4882a593Smuzhiyun else
999*4882a593Smuzhiyun return 0;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun /*
1003*4882a593Smuzhiyun * Release the device PFN back to the pool
1004*4882a593Smuzhiyun *
1005*4882a593Smuzhiyun * Gets called when secure GFN tranistions from a secure-PFN
1006*4882a593Smuzhiyun * to a normal PFN during H_SVM_PAGE_OUT.
1007*4882a593Smuzhiyun * Gets called with kvm->arch.uvmem_lock held.
1008*4882a593Smuzhiyun */
kvmppc_uvmem_page_free(struct page * page)1009*4882a593Smuzhiyun static void kvmppc_uvmem_page_free(struct page *page)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun unsigned long pfn = page_to_pfn(page) -
1012*4882a593Smuzhiyun (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
1013*4882a593Smuzhiyun struct kvmppc_uvmem_page_pvt *pvt;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun spin_lock(&kvmppc_uvmem_bitmap_lock);
1016*4882a593Smuzhiyun bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
1017*4882a593Smuzhiyun spin_unlock(&kvmppc_uvmem_bitmap_lock);
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun pvt = page->zone_device_data;
1020*4882a593Smuzhiyun page->zone_device_data = NULL;
1021*4882a593Smuzhiyun if (pvt->remove_gfn)
1022*4882a593Smuzhiyun kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1023*4882a593Smuzhiyun else
1024*4882a593Smuzhiyun kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1025*4882a593Smuzhiyun kfree(pvt);
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
1029*4882a593Smuzhiyun .page_free = kvmppc_uvmem_page_free,
1030*4882a593Smuzhiyun .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
1031*4882a593Smuzhiyun };
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /*
1034*4882a593Smuzhiyun * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
1035*4882a593Smuzhiyun */
1036*4882a593Smuzhiyun unsigned long
kvmppc_h_svm_page_out(struct kvm * kvm,unsigned long gpa,unsigned long flags,unsigned long page_shift)1037*4882a593Smuzhiyun kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1038*4882a593Smuzhiyun unsigned long flags, unsigned long page_shift)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun unsigned long gfn = gpa >> page_shift;
1041*4882a593Smuzhiyun unsigned long start, end;
1042*4882a593Smuzhiyun struct vm_area_struct *vma;
1043*4882a593Smuzhiyun int srcu_idx;
1044*4882a593Smuzhiyun int ret;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1047*4882a593Smuzhiyun return H_UNSUPPORTED;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (page_shift != PAGE_SHIFT)
1050*4882a593Smuzhiyun return H_P3;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun if (flags)
1053*4882a593Smuzhiyun return H_P2;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun ret = H_PARAMETER;
1056*4882a593Smuzhiyun srcu_idx = srcu_read_lock(&kvm->srcu);
1057*4882a593Smuzhiyun mmap_read_lock(kvm->mm);
1058*4882a593Smuzhiyun start = gfn_to_hva(kvm, gfn);
1059*4882a593Smuzhiyun if (kvm_is_error_hva(start))
1060*4882a593Smuzhiyun goto out;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun end = start + (1UL << page_shift);
1063*4882a593Smuzhiyun vma = find_vma_intersection(kvm->mm, start, end);
1064*4882a593Smuzhiyun if (!vma || vma->vm_start > start || vma->vm_end < end)
1065*4882a593Smuzhiyun goto out;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
1068*4882a593Smuzhiyun ret = H_SUCCESS;
1069*4882a593Smuzhiyun out:
1070*4882a593Smuzhiyun mmap_read_unlock(kvm->mm);
1071*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, srcu_idx);
1072*4882a593Smuzhiyun return ret;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
kvmppc_send_page_to_uv(struct kvm * kvm,unsigned long gfn)1075*4882a593Smuzhiyun int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun unsigned long pfn;
1078*4882a593Smuzhiyun int ret = U_SUCCESS;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun pfn = gfn_to_pfn(kvm, gfn);
1081*4882a593Smuzhiyun if (is_error_noslot_pfn(pfn))
1082*4882a593Smuzhiyun return -EFAULT;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun mutex_lock(&kvm->arch.uvmem_lock);
1085*4882a593Smuzhiyun if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1086*4882a593Smuzhiyun goto out;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1089*4882a593Smuzhiyun 0, PAGE_SHIFT);
1090*4882a593Smuzhiyun out:
1091*4882a593Smuzhiyun kvm_release_pfn_clean(pfn);
1092*4882a593Smuzhiyun mutex_unlock(&kvm->arch.uvmem_lock);
1093*4882a593Smuzhiyun return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
kvmppc_uvmem_memslot_create(struct kvm * kvm,const struct kvm_memory_slot * new)1096*4882a593Smuzhiyun int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun if (!ret)
1101*4882a593Smuzhiyun ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun return ret;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
kvmppc_uvmem_memslot_delete(struct kvm * kvm,const struct kvm_memory_slot * old)1106*4882a593Smuzhiyun void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun __kvmppc_uvmem_memslot_delete(kvm, old);
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
kvmppc_get_secmem_size(void)1111*4882a593Smuzhiyun static u64 kvmppc_get_secmem_size(void)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun struct device_node *np;
1114*4882a593Smuzhiyun int i, len;
1115*4882a593Smuzhiyun const __be32 *prop;
1116*4882a593Smuzhiyun u64 size = 0;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun /*
1119*4882a593Smuzhiyun * First try the new ibm,secure-memory nodes which supersede the
1120*4882a593Smuzhiyun * secure-memory-ranges property.
1121*4882a593Smuzhiyun * If we found some, no need to read the deprecated ones.
1122*4882a593Smuzhiyun */
1123*4882a593Smuzhiyun for_each_compatible_node(np, NULL, "ibm,secure-memory") {
1124*4882a593Smuzhiyun prop = of_get_property(np, "reg", &len);
1125*4882a593Smuzhiyun if (!prop)
1126*4882a593Smuzhiyun continue;
1127*4882a593Smuzhiyun size += of_read_number(prop + 2, 2);
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun if (size)
1130*4882a593Smuzhiyun return size;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
1133*4882a593Smuzhiyun if (!np)
1134*4882a593Smuzhiyun goto out;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun prop = of_get_property(np, "secure-memory-ranges", &len);
1137*4882a593Smuzhiyun if (!prop)
1138*4882a593Smuzhiyun goto out_put;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun for (i = 0; i < len / (sizeof(*prop) * 4); i++)
1141*4882a593Smuzhiyun size += of_read_number(prop + (i * 4) + 2, 2);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun out_put:
1144*4882a593Smuzhiyun of_node_put(np);
1145*4882a593Smuzhiyun out:
1146*4882a593Smuzhiyun return size;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
kvmppc_uvmem_init(void)1149*4882a593Smuzhiyun int kvmppc_uvmem_init(void)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun int ret = 0;
1152*4882a593Smuzhiyun unsigned long size;
1153*4882a593Smuzhiyun struct resource *res;
1154*4882a593Smuzhiyun void *addr;
1155*4882a593Smuzhiyun unsigned long pfn_last, pfn_first;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun size = kvmppc_get_secmem_size();
1158*4882a593Smuzhiyun if (!size) {
1159*4882a593Smuzhiyun /*
1160*4882a593Smuzhiyun * Don't fail the initialization of kvm-hv module if
1161*4882a593Smuzhiyun * the platform doesn't export ibm,uv-firmware node.
1162*4882a593Smuzhiyun * Let normal guests run on such PEF-disabled platform.
1163*4882a593Smuzhiyun */
1164*4882a593Smuzhiyun pr_info("KVMPPC-UVMEM: No support for secure guests\n");
1165*4882a593Smuzhiyun goto out;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
1169*4882a593Smuzhiyun if (IS_ERR(res)) {
1170*4882a593Smuzhiyun ret = PTR_ERR(res);
1171*4882a593Smuzhiyun goto out;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1175*4882a593Smuzhiyun kvmppc_uvmem_pgmap.range.start = res->start;
1176*4882a593Smuzhiyun kvmppc_uvmem_pgmap.range.end = res->end;
1177*4882a593Smuzhiyun kvmppc_uvmem_pgmap.nr_range = 1;
1178*4882a593Smuzhiyun kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
1179*4882a593Smuzhiyun /* just one global instance: */
1180*4882a593Smuzhiyun kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
1181*4882a593Smuzhiyun addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
1182*4882a593Smuzhiyun if (IS_ERR(addr)) {
1183*4882a593Smuzhiyun ret = PTR_ERR(addr);
1184*4882a593Smuzhiyun goto out_free_region;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun pfn_first = res->start >> PAGE_SHIFT;
1188*4882a593Smuzhiyun pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
1189*4882a593Smuzhiyun kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
1190*4882a593Smuzhiyun sizeof(unsigned long), GFP_KERNEL);
1191*4882a593Smuzhiyun if (!kvmppc_uvmem_bitmap) {
1192*4882a593Smuzhiyun ret = -ENOMEM;
1193*4882a593Smuzhiyun goto out_unmap;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
1197*4882a593Smuzhiyun return ret;
1198*4882a593Smuzhiyun out_unmap:
1199*4882a593Smuzhiyun memunmap_pages(&kvmppc_uvmem_pgmap);
1200*4882a593Smuzhiyun out_free_region:
1201*4882a593Smuzhiyun release_mem_region(res->start, size);
1202*4882a593Smuzhiyun out:
1203*4882a593Smuzhiyun return ret;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
kvmppc_uvmem_free(void)1206*4882a593Smuzhiyun void kvmppc_uvmem_free(void)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun if (!kvmppc_uvmem_bitmap)
1209*4882a593Smuzhiyun return;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun memunmap_pages(&kvmppc_uvmem_pgmap);
1212*4882a593Smuzhiyun release_mem_region(kvmppc_uvmem_pgmap.range.start,
1213*4882a593Smuzhiyun range_len(&kvmppc_uvmem_pgmap.range));
1214*4882a593Smuzhiyun kfree(kvmppc_uvmem_bitmap);
1215*4882a593Smuzhiyun }
1216