1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * MMU operations common to all auto-translated physmap guests.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2015 Citrix Systems R&D Ltd.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
7*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version 2
8*4882a593Smuzhiyun * as published by the Free Software Foundation; or, when distributed
9*4882a593Smuzhiyun * separately from the Linux kernel or incorporated into other
10*4882a593Smuzhiyun * software packages, subject to the following license:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a copy
13*4882a593Smuzhiyun * of this source file (the "Software"), to deal in the Software without
14*4882a593Smuzhiyun * restriction, including without limitation the rights to use, copy, modify,
15*4882a593Smuzhiyun * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16*4882a593Smuzhiyun * and to permit persons to whom the Software is furnished to do so, subject to
17*4882a593Smuzhiyun * the following conditions:
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
20*4882a593Smuzhiyun * all copies or substantial portions of the Software.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25*4882a593Smuzhiyun * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28*4882a593Smuzhiyun * IN THE SOFTWARE.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun #include <linux/kernel.h>
31*4882a593Smuzhiyun #include <linux/mm.h>
32*4882a593Smuzhiyun #include <linux/slab.h>
33*4882a593Smuzhiyun #include <linux/vmalloc.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <asm/xen/hypercall.h>
36*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <xen/xen.h>
39*4882a593Smuzhiyun #include <xen/xen-ops.h>
40*4882a593Smuzhiyun #include <xen/page.h>
41*4882a593Smuzhiyun #include <xen/interface/xen.h>
42*4882a593Smuzhiyun #include <xen/interface/memory.h>
43*4882a593Smuzhiyun #include <xen/balloon.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* Break down the pages in 4KB chunk and call fn for each gfn */
xen_for_each_gfn(struct page ** pages,unsigned nr_gfn,xen_gfn_fn_t fn,void * data)48*4882a593Smuzhiyun static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
49*4882a593Smuzhiyun xen_gfn_fn_t fn, void *data)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun unsigned long xen_pfn = 0;
52*4882a593Smuzhiyun struct page *page;
53*4882a593Smuzhiyun int i;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun for (i = 0; i < nr_gfn; i++) {
56*4882a593Smuzhiyun if ((i % XEN_PFN_PER_PAGE) == 0) {
57*4882a593Smuzhiyun page = pages[i / XEN_PFN_PER_PAGE];
58*4882a593Smuzhiyun xen_pfn = page_to_xen_pfn(page);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun fn(pfn_to_gfn(xen_pfn++), data);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun struct remap_data {
65*4882a593Smuzhiyun xen_pfn_t *fgfn; /* foreign domain's gfn */
66*4882a593Smuzhiyun int nr_fgfn; /* Number of foreign gfn left to map */
67*4882a593Smuzhiyun pgprot_t prot;
68*4882a593Smuzhiyun domid_t domid;
69*4882a593Smuzhiyun struct vm_area_struct *vma;
70*4882a593Smuzhiyun int index;
71*4882a593Smuzhiyun struct page **pages;
72*4882a593Smuzhiyun struct xen_remap_gfn_info *info;
73*4882a593Smuzhiyun int *err_ptr;
74*4882a593Smuzhiyun int mapped;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Hypercall parameters */
77*4882a593Smuzhiyun int h_errs[XEN_PFN_PER_PAGE];
78*4882a593Smuzhiyun xen_ulong_t h_idxs[XEN_PFN_PER_PAGE];
79*4882a593Smuzhiyun xen_pfn_t h_gpfns[XEN_PFN_PER_PAGE];
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun int h_iter; /* Iterator */
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
setup_hparams(unsigned long gfn,void * data)84*4882a593Smuzhiyun static void setup_hparams(unsigned long gfn, void *data)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct remap_data *info = data;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun info->h_idxs[info->h_iter] = *info->fgfn;
89*4882a593Smuzhiyun info->h_gpfns[info->h_iter] = gfn;
90*4882a593Smuzhiyun info->h_errs[info->h_iter] = 0;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun info->h_iter++;
93*4882a593Smuzhiyun info->fgfn++;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
remap_pte_fn(pte_t * ptep,unsigned long addr,void * data)96*4882a593Smuzhiyun static int remap_pte_fn(pte_t *ptep, unsigned long addr, void *data)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct remap_data *info = data;
99*4882a593Smuzhiyun struct page *page = info->pages[info->index++];
100*4882a593Smuzhiyun pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot));
101*4882a593Smuzhiyun int rc, nr_gfn;
102*4882a593Smuzhiyun uint32_t i;
103*4882a593Smuzhiyun struct xen_add_to_physmap_range xatp = {
104*4882a593Smuzhiyun .domid = DOMID_SELF,
105*4882a593Smuzhiyun .foreign_domid = info->domid,
106*4882a593Smuzhiyun .space = XENMAPSPACE_gmfn_foreign,
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun nr_gfn = min_t(typeof(info->nr_fgfn), XEN_PFN_PER_PAGE, info->nr_fgfn);
110*4882a593Smuzhiyun info->nr_fgfn -= nr_gfn;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun info->h_iter = 0;
113*4882a593Smuzhiyun xen_for_each_gfn(&page, nr_gfn, setup_hparams, info);
114*4882a593Smuzhiyun BUG_ON(info->h_iter != nr_gfn);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun set_xen_guest_handle(xatp.idxs, info->h_idxs);
117*4882a593Smuzhiyun set_xen_guest_handle(xatp.gpfns, info->h_gpfns);
118*4882a593Smuzhiyun set_xen_guest_handle(xatp.errs, info->h_errs);
119*4882a593Smuzhiyun xatp.size = nr_gfn;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* info->err_ptr expect to have one error status per Xen PFN */
124*4882a593Smuzhiyun for (i = 0; i < nr_gfn; i++) {
125*4882a593Smuzhiyun int err = (rc < 0) ? rc : info->h_errs[i];
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun *(info->err_ptr++) = err;
128*4882a593Smuzhiyun if (!err)
129*4882a593Smuzhiyun info->mapped++;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * Note: The hypercall will return 0 in most of the case if even if
134*4882a593Smuzhiyun * all the fgmfn are not mapped. We still have to update the pte
135*4882a593Smuzhiyun * as the userspace may decide to continue.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun if (!rc)
138*4882a593Smuzhiyun set_pte_at(info->vma->vm_mm, addr, ptep, pte);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun return 0;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
xen_xlate_remap_gfn_array(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * gfn,int nr,int * err_ptr,pgprot_t prot,unsigned domid,struct page ** pages)143*4882a593Smuzhiyun int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
144*4882a593Smuzhiyun unsigned long addr,
145*4882a593Smuzhiyun xen_pfn_t *gfn, int nr,
146*4882a593Smuzhiyun int *err_ptr, pgprot_t prot,
147*4882a593Smuzhiyun unsigned domid,
148*4882a593Smuzhiyun struct page **pages)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun int err;
151*4882a593Smuzhiyun struct remap_data data;
152*4882a593Smuzhiyun unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Kept here for the purpose of making sure code doesn't break
155*4882a593Smuzhiyun x86 PVOPS */
156*4882a593Smuzhiyun BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun data.fgfn = gfn;
159*4882a593Smuzhiyun data.nr_fgfn = nr;
160*4882a593Smuzhiyun data.prot = prot;
161*4882a593Smuzhiyun data.domid = domid;
162*4882a593Smuzhiyun data.vma = vma;
163*4882a593Smuzhiyun data.pages = pages;
164*4882a593Smuzhiyun data.index = 0;
165*4882a593Smuzhiyun data.err_ptr = err_ptr;
166*4882a593Smuzhiyun data.mapped = 0;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun err = apply_to_page_range(vma->vm_mm, addr, range,
169*4882a593Smuzhiyun remap_pte_fn, &data);
170*4882a593Smuzhiyun return err < 0 ? err : data.mapped;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
173*4882a593Smuzhiyun
unmap_gfn(unsigned long gfn,void * data)174*4882a593Smuzhiyun static void unmap_gfn(unsigned long gfn, void *data)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun struct xen_remove_from_physmap xrp;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun xrp.domid = DOMID_SELF;
179*4882a593Smuzhiyun xrp.gpfn = gfn;
180*4882a593Smuzhiyun (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
xen_xlate_unmap_gfn_range(struct vm_area_struct * vma,int nr,struct page ** pages)183*4882a593Smuzhiyun int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
184*4882a593Smuzhiyun int nr, struct page **pages)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun xen_for_each_gfn(pages, nr, unmap_gfn, NULL);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun struct map_balloon_pages {
193*4882a593Smuzhiyun xen_pfn_t *pfns;
194*4882a593Smuzhiyun unsigned int idx;
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun
setup_balloon_gfn(unsigned long gfn,void * data)197*4882a593Smuzhiyun static void setup_balloon_gfn(unsigned long gfn, void *data)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct map_balloon_pages *info = data;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun info->pfns[info->idx++] = gfn;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /**
205*4882a593Smuzhiyun * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
206*4882a593Smuzhiyun * @gfns: returns the array of corresponding GFNs
207*4882a593Smuzhiyun * @virt: returns the virtual address of the mapped region
208*4882a593Smuzhiyun * @nr_grant_frames: number of GFNs
209*4882a593Smuzhiyun * @return 0 on success, error otherwise
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * This allocates a set of ballooned pages and maps them into the
212*4882a593Smuzhiyun * kernel's address space.
213*4882a593Smuzhiyun */
xen_xlate_map_ballooned_pages(xen_pfn_t ** gfns,void ** virt,unsigned long nr_grant_frames)214*4882a593Smuzhiyun int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
215*4882a593Smuzhiyun unsigned long nr_grant_frames)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct page **pages;
218*4882a593Smuzhiyun xen_pfn_t *pfns;
219*4882a593Smuzhiyun void *vaddr;
220*4882a593Smuzhiyun struct map_balloon_pages data;
221*4882a593Smuzhiyun int rc;
222*4882a593Smuzhiyun unsigned long nr_pages;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun BUG_ON(nr_grant_frames == 0);
225*4882a593Smuzhiyun nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE);
226*4882a593Smuzhiyun pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL);
227*4882a593Smuzhiyun if (!pages)
228*4882a593Smuzhiyun return -ENOMEM;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
231*4882a593Smuzhiyun if (!pfns) {
232*4882a593Smuzhiyun kfree(pages);
233*4882a593Smuzhiyun return -ENOMEM;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun rc = xen_alloc_unpopulated_pages(nr_pages, pages);
236*4882a593Smuzhiyun if (rc) {
237*4882a593Smuzhiyun pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
238*4882a593Smuzhiyun nr_pages, rc);
239*4882a593Smuzhiyun kfree(pages);
240*4882a593Smuzhiyun kfree(pfns);
241*4882a593Smuzhiyun return rc;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun data.pfns = pfns;
245*4882a593Smuzhiyun data.idx = 0;
246*4882a593Smuzhiyun xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
249*4882a593Smuzhiyun if (!vaddr) {
250*4882a593Smuzhiyun pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
251*4882a593Smuzhiyun nr_pages, rc);
252*4882a593Smuzhiyun xen_free_unpopulated_pages(nr_pages, pages);
253*4882a593Smuzhiyun kfree(pages);
254*4882a593Smuzhiyun kfree(pfns);
255*4882a593Smuzhiyun return -ENOMEM;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun kfree(pages);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun *gfns = pfns;
260*4882a593Smuzhiyun *virt = vaddr;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun struct remap_pfn {
266*4882a593Smuzhiyun struct mm_struct *mm;
267*4882a593Smuzhiyun struct page **pages;
268*4882a593Smuzhiyun pgprot_t prot;
269*4882a593Smuzhiyun unsigned long i;
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun
remap_pfn_fn(pte_t * ptep,unsigned long addr,void * data)272*4882a593Smuzhiyun static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct remap_pfn *r = data;
275*4882a593Smuzhiyun struct page *page = r->pages[r->i];
276*4882a593Smuzhiyun pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun set_pte_at(r->mm, addr, ptep, pte);
279*4882a593Smuzhiyun r->i++;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Used by the privcmd module, but has to be built-in on ARM */
xen_remap_vma_range(struct vm_area_struct * vma,unsigned long addr,unsigned long len)285*4882a593Smuzhiyun int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct remap_pfn r = {
288*4882a593Smuzhiyun .mm = vma->vm_mm,
289*4882a593Smuzhiyun .pages = vma->vm_private_data,
290*4882a593Smuzhiyun .prot = vma->vm_page_prot,
291*4882a593Smuzhiyun };
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_remap_vma_range);
296