xref: /OK3568_Linux_fs/kernel/arch/x86/xen/mmu_pv.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun  * Xen mmu operations
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This file contains the various mmu fetch and update operations.
7*4882a593Smuzhiyun  * The most important job they must perform is the mapping between the
8*4882a593Smuzhiyun  * domain's pfn and the overall machine mfns.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Xen allows guests to directly update the pagetable, in a controlled
11*4882a593Smuzhiyun  * fashion.  In other words, the guest modifies the same pagetable
12*4882a593Smuzhiyun  * that the CPU actually uses, which eliminates the overhead of having
13*4882a593Smuzhiyun  * a separate shadow pagetable.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * In order to allow this, it falls on the guest domain to map its
16*4882a593Smuzhiyun  * notion of a "physical" pfn - which is just a domain-local linear
17*4882a593Smuzhiyun  * address - into a real "machine address" which the CPU's MMU can
18*4882a593Smuzhiyun  * use.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
21*4882a593Smuzhiyun  * inserted directly into the pagetable.  When creating a new
22*4882a593Smuzhiyun  * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely,
23*4882a593Smuzhiyun  * when reading the content back with __(pgd|pmd|pte)_val, it converts
24*4882a593Smuzhiyun  * the mfn back into a pfn.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * The other constraint is that all pages which make up a pagetable
27*4882a593Smuzhiyun  * must be mapped read-only in the guest.  This prevents uncontrolled
28*4882a593Smuzhiyun  * guest updates to the pagetable.  Xen strictly enforces this, and
29*4882a593Smuzhiyun  * will disallow any pagetable update which will end up mapping a
30*4882a593Smuzhiyun  * pagetable page RW, and will disallow using any writable page as a
31*4882a593Smuzhiyun  * pagetable.
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * Naively, when loading %cr3 with the base of a new pagetable, Xen
34*4882a593Smuzhiyun  * would need to validate the whole pagetable before going on.
35*4882a593Smuzhiyun  * Naturally, this is quite slow.  The solution is to "pin" a
36*4882a593Smuzhiyun  * pagetable, which enforces all the constraints on the pagetable even
37*4882a593Smuzhiyun  * when it is not actively in use.  This menas that Xen can be assured
38*4882a593Smuzhiyun  * that it is still valid when you do load it into %cr3, and doesn't
39*4882a593Smuzhiyun  * need to revalidate it.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun #include <linux/sched/mm.h>
44*4882a593Smuzhiyun #include <linux/highmem.h>
45*4882a593Smuzhiyun #include <linux/debugfs.h>
46*4882a593Smuzhiyun #include <linux/bug.h>
47*4882a593Smuzhiyun #include <linux/vmalloc.h>
48*4882a593Smuzhiyun #include <linux/export.h>
49*4882a593Smuzhiyun #include <linux/init.h>
50*4882a593Smuzhiyun #include <linux/gfp.h>
51*4882a593Smuzhiyun #include <linux/memblock.h>
52*4882a593Smuzhiyun #include <linux/seq_file.h>
53*4882a593Smuzhiyun #include <linux/crash_dump.h>
54*4882a593Smuzhiyun #include <linux/pgtable.h>
55*4882a593Smuzhiyun #ifdef CONFIG_KEXEC_CORE
56*4882a593Smuzhiyun #include <linux/kexec.h>
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #include <trace/events/xen.h>
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #include <asm/tlbflush.h>
62*4882a593Smuzhiyun #include <asm/fixmap.h>
63*4882a593Smuzhiyun #include <asm/mmu_context.h>
64*4882a593Smuzhiyun #include <asm/setup.h>
65*4882a593Smuzhiyun #include <asm/paravirt.h>
66*4882a593Smuzhiyun #include <asm/e820/api.h>
67*4882a593Smuzhiyun #include <asm/linkage.h>
68*4882a593Smuzhiyun #include <asm/page.h>
69*4882a593Smuzhiyun #include <asm/init.h>
70*4882a593Smuzhiyun #include <asm/memtype.h>
71*4882a593Smuzhiyun #include <asm/smp.h>
72*4882a593Smuzhiyun #include <asm/tlb.h>
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #include <asm/xen/hypercall.h>
75*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #include <xen/xen.h>
78*4882a593Smuzhiyun #include <xen/page.h>
79*4882a593Smuzhiyun #include <xen/interface/xen.h>
80*4882a593Smuzhiyun #include <xen/interface/hvm/hvm_op.h>
81*4882a593Smuzhiyun #include <xen/interface/version.h>
82*4882a593Smuzhiyun #include <xen/interface/memory.h>
83*4882a593Smuzhiyun #include <xen/hvc-console.h>
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #include "multicalls.h"
86*4882a593Smuzhiyun #include "mmu.h"
87*4882a593Smuzhiyun #include "debugfs.h"
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* l3 pud for userspace vsyscall mapping */
90*4882a593Smuzhiyun static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * Protects atomic reservation decrease/increase against concurrent increases.
94*4882a593Smuzhiyun  * Also protects non-atomic updates of current_pages and balloon lists.
95*4882a593Smuzhiyun  */
96*4882a593Smuzhiyun static DEFINE_SPINLOCK(xen_reservation_lock);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun  * Note about cr3 (pagetable base) values:
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  * xen_cr3 contains the current logical cr3 value; it contains the
102*4882a593Smuzhiyun  * last set cr3.  This may not be the current effective cr3, because
103*4882a593Smuzhiyun  * its update may be being lazily deferred.  However, a vcpu looking
104*4882a593Smuzhiyun  * at its own cr3 can use this value knowing that it everything will
105*4882a593Smuzhiyun  * be self-consistent.
106*4882a593Smuzhiyun  *
107*4882a593Smuzhiyun  * xen_current_cr3 contains the actual vcpu cr3; it is set once the
108*4882a593Smuzhiyun  * hypercall to set the vcpu cr3 is complete (so it may be a little
109*4882a593Smuzhiyun  * out of date, but it will never be set early).  If one vcpu is
110*4882a593Smuzhiyun  * looking at another vcpu's cr3 value, it should use this variable.
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun DEFINE_PER_CPU(unsigned long, xen_cr3);	 /* cr3 stored as physaddr */
113*4882a593Smuzhiyun DEFINE_PER_CPU(unsigned long, xen_current_cr3);	 /* actual vcpu cr3 */
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun static phys_addr_t xen_pt_base, xen_pt_size __initdata;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * Just beyond the highest usermode address.  STACK_TOP_MAX has a
121*4882a593Smuzhiyun  * redzone above it, so round it up to a PGD boundary.
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun #define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
124*4882a593Smuzhiyun 
make_lowmem_page_readonly(void * vaddr)125*4882a593Smuzhiyun void make_lowmem_page_readonly(void *vaddr)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	pte_t *pte, ptev;
128*4882a593Smuzhiyun 	unsigned long address = (unsigned long)vaddr;
129*4882a593Smuzhiyun 	unsigned int level;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	pte = lookup_address(address, &level);
132*4882a593Smuzhiyun 	if (pte == NULL)
133*4882a593Smuzhiyun 		return;		/* vaddr missing */
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	ptev = pte_wrprotect(*pte);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
138*4882a593Smuzhiyun 		BUG();
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
make_lowmem_page_readwrite(void * vaddr)141*4882a593Smuzhiyun void make_lowmem_page_readwrite(void *vaddr)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	pte_t *pte, ptev;
144*4882a593Smuzhiyun 	unsigned long address = (unsigned long)vaddr;
145*4882a593Smuzhiyun 	unsigned int level;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	pte = lookup_address(address, &level);
148*4882a593Smuzhiyun 	if (pte == NULL)
149*4882a593Smuzhiyun 		return;		/* vaddr missing */
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	ptev = pte_mkwrite(*pte);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
154*4882a593Smuzhiyun 		BUG();
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun  * During early boot all page table pages are pinned, but we do not have struct
160*4882a593Smuzhiyun  * pages, so return true until struct pages are ready.
161*4882a593Smuzhiyun  */
xen_page_pinned(void * ptr)162*4882a593Smuzhiyun static bool xen_page_pinned(void *ptr)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	if (static_branch_likely(&xen_struct_pages_ready)) {
165*4882a593Smuzhiyun 		struct page *page = virt_to_page(ptr);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 		return PagePinned(page);
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 	return true;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
xen_extend_mmu_update(const struct mmu_update * update)172*4882a593Smuzhiyun static void xen_extend_mmu_update(const struct mmu_update *update)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct multicall_space mcs;
175*4882a593Smuzhiyun 	struct mmu_update *u;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (mcs.mc != NULL) {
180*4882a593Smuzhiyun 		mcs.mc->args[1]++;
181*4882a593Smuzhiyun 	} else {
182*4882a593Smuzhiyun 		mcs = __xen_mc_entry(sizeof(*u));
183*4882a593Smuzhiyun 		MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	u = mcs.args;
187*4882a593Smuzhiyun 	*u = *update;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
xen_extend_mmuext_op(const struct mmuext_op * op)190*4882a593Smuzhiyun static void xen_extend_mmuext_op(const struct mmuext_op *op)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct multicall_space mcs;
193*4882a593Smuzhiyun 	struct mmuext_op *u;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (mcs.mc != NULL) {
198*4882a593Smuzhiyun 		mcs.mc->args[1]++;
199*4882a593Smuzhiyun 	} else {
200*4882a593Smuzhiyun 		mcs = __xen_mc_entry(sizeof(*u));
201*4882a593Smuzhiyun 		MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	u = mcs.args;
205*4882a593Smuzhiyun 	*u = *op;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
xen_set_pmd_hyper(pmd_t * ptr,pmd_t val)208*4882a593Smuzhiyun static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct mmu_update u;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	preempt_disable();
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	xen_mc_batch();
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/* ptr may be ioremapped for 64-bit pagetable setup */
217*4882a593Smuzhiyun 	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
218*4882a593Smuzhiyun 	u.val = pmd_val_ma(val);
219*4882a593Smuzhiyun 	xen_extend_mmu_update(&u);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	preempt_enable();
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
xen_set_pmd(pmd_t * ptr,pmd_t val)226*4882a593Smuzhiyun static void xen_set_pmd(pmd_t *ptr, pmd_t val)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	trace_xen_mmu_set_pmd(ptr, val);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/* If page is not pinned, we can just update the entry
231*4882a593Smuzhiyun 	   directly */
232*4882a593Smuzhiyun 	if (!xen_page_pinned(ptr)) {
233*4882a593Smuzhiyun 		*ptr = val;
234*4882a593Smuzhiyun 		return;
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	xen_set_pmd_hyper(ptr, val);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun  * Associate a virtual page frame with a given physical page frame
242*4882a593Smuzhiyun  * and protection flags for that frame.
243*4882a593Smuzhiyun  */
set_pte_mfn(unsigned long vaddr,unsigned long mfn,pgprot_t flags)244*4882a593Smuzhiyun void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
xen_batched_set_pte(pte_t * ptep,pte_t pteval)249*4882a593Smuzhiyun static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	struct mmu_update u;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
254*4882a593Smuzhiyun 		return false;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	xen_mc_batch();
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
259*4882a593Smuzhiyun 	u.val = pte_val_ma(pteval);
260*4882a593Smuzhiyun 	xen_extend_mmu_update(&u);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	return true;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
__xen_set_pte(pte_t * ptep,pte_t pteval)267*4882a593Smuzhiyun static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	if (!xen_batched_set_pte(ptep, pteval)) {
270*4882a593Smuzhiyun 		/*
271*4882a593Smuzhiyun 		 * Could call native_set_pte() here and trap and
272*4882a593Smuzhiyun 		 * emulate the PTE write, but a hypercall is much cheaper.
273*4882a593Smuzhiyun 		 */
274*4882a593Smuzhiyun 		struct mmu_update u;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
277*4882a593Smuzhiyun 		u.val = pte_val_ma(pteval);
278*4882a593Smuzhiyun 		HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
xen_set_pte(pte_t * ptep,pte_t pteval)282*4882a593Smuzhiyun static void xen_set_pte(pte_t *ptep, pte_t pteval)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	trace_xen_mmu_set_pte(ptep, pteval);
285*4882a593Smuzhiyun 	__xen_set_pte(ptep, pteval);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
xen_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)288*4882a593Smuzhiyun pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
289*4882a593Smuzhiyun 				 unsigned long addr, pte_t *ptep)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	/* Just return the pte as-is.  We preserve the bits on commit */
292*4882a593Smuzhiyun 	trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
293*4882a593Smuzhiyun 	return *ptep;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
xen_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte)296*4882a593Smuzhiyun void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
297*4882a593Smuzhiyun 				 pte_t *ptep, pte_t pte)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct mmu_update u;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
302*4882a593Smuzhiyun 	xen_mc_batch();
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
305*4882a593Smuzhiyun 	u.val = pte_val_ma(pte);
306*4882a593Smuzhiyun 	xen_extend_mmu_update(&u);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /* Assume pteval_t is equivalent to all the other *val_t types. */
pte_mfn_to_pfn(pteval_t val)312*4882a593Smuzhiyun static pteval_t pte_mfn_to_pfn(pteval_t val)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	if (val & _PAGE_PRESENT) {
315*4882a593Smuzhiyun 		unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
316*4882a593Smuzhiyun 		unsigned long pfn = mfn_to_pfn(mfn);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		pteval_t flags = val & PTE_FLAGS_MASK;
319*4882a593Smuzhiyun 		if (unlikely(pfn == ~0))
320*4882a593Smuzhiyun 			val = flags & ~_PAGE_PRESENT;
321*4882a593Smuzhiyun 		else
322*4882a593Smuzhiyun 			val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return val;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
pte_pfn_to_mfn(pteval_t val)328*4882a593Smuzhiyun static pteval_t pte_pfn_to_mfn(pteval_t val)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	if (val & _PAGE_PRESENT) {
331*4882a593Smuzhiyun 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
332*4882a593Smuzhiyun 		pteval_t flags = val & PTE_FLAGS_MASK;
333*4882a593Smuzhiyun 		unsigned long mfn;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 		mfn = __pfn_to_mfn(pfn);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 		/*
338*4882a593Smuzhiyun 		 * If there's no mfn for the pfn, then just create an
339*4882a593Smuzhiyun 		 * empty non-present pte.  Unfortunately this loses
340*4882a593Smuzhiyun 		 * information about the original pfn, so
341*4882a593Smuzhiyun 		 * pte_mfn_to_pfn is asymmetric.
342*4882a593Smuzhiyun 		 */
343*4882a593Smuzhiyun 		if (unlikely(mfn == INVALID_P2M_ENTRY)) {
344*4882a593Smuzhiyun 			mfn = 0;
345*4882a593Smuzhiyun 			flags = 0;
346*4882a593Smuzhiyun 		} else
347*4882a593Smuzhiyun 			mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
348*4882a593Smuzhiyun 		val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	return val;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
xen_pte_val(pte_t pte)354*4882a593Smuzhiyun __visible pteval_t xen_pte_val(pte_t pte)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	pteval_t pteval = pte.pte;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	return pte_mfn_to_pfn(pteval);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
361*4882a593Smuzhiyun 
xen_pgd_val(pgd_t pgd)362*4882a593Smuzhiyun __visible pgdval_t xen_pgd_val(pgd_t pgd)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	return pte_mfn_to_pfn(pgd.pgd);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
367*4882a593Smuzhiyun 
xen_make_pte(pteval_t pte)368*4882a593Smuzhiyun __visible pte_t xen_make_pte(pteval_t pte)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	pte = pte_pfn_to_mfn(pte);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	return native_make_pte(pte);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
375*4882a593Smuzhiyun 
xen_make_pgd(pgdval_t pgd)376*4882a593Smuzhiyun __visible pgd_t xen_make_pgd(pgdval_t pgd)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	pgd = pte_pfn_to_mfn(pgd);
379*4882a593Smuzhiyun 	return native_make_pgd(pgd);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
382*4882a593Smuzhiyun 
xen_pmd_val(pmd_t pmd)383*4882a593Smuzhiyun __visible pmdval_t xen_pmd_val(pmd_t pmd)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	return pte_mfn_to_pfn(pmd.pmd);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
388*4882a593Smuzhiyun 
xen_set_pud_hyper(pud_t * ptr,pud_t val)389*4882a593Smuzhiyun static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	struct mmu_update u;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	preempt_disable();
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	xen_mc_batch();
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* ptr may be ioremapped for 64-bit pagetable setup */
398*4882a593Smuzhiyun 	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
399*4882a593Smuzhiyun 	u.val = pud_val_ma(val);
400*4882a593Smuzhiyun 	xen_extend_mmu_update(&u);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	preempt_enable();
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
xen_set_pud(pud_t * ptr,pud_t val)407*4882a593Smuzhiyun static void xen_set_pud(pud_t *ptr, pud_t val)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	trace_xen_mmu_set_pud(ptr, val);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/* If page is not pinned, we can just update the entry
412*4882a593Smuzhiyun 	   directly */
413*4882a593Smuzhiyun 	if (!xen_page_pinned(ptr)) {
414*4882a593Smuzhiyun 		*ptr = val;
415*4882a593Smuzhiyun 		return;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	xen_set_pud_hyper(ptr, val);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
xen_make_pmd(pmdval_t pmd)421*4882a593Smuzhiyun __visible pmd_t xen_make_pmd(pmdval_t pmd)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	pmd = pte_pfn_to_mfn(pmd);
424*4882a593Smuzhiyun 	return native_make_pmd(pmd);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
427*4882a593Smuzhiyun 
xen_pud_val(pud_t pud)428*4882a593Smuzhiyun __visible pudval_t xen_pud_val(pud_t pud)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	return pte_mfn_to_pfn(pud.pud);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
433*4882a593Smuzhiyun 
xen_make_pud(pudval_t pud)434*4882a593Smuzhiyun __visible pud_t xen_make_pud(pudval_t pud)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	pud = pte_pfn_to_mfn(pud);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	return native_make_pud(pud);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
441*4882a593Smuzhiyun 
xen_get_user_pgd(pgd_t * pgd)442*4882a593Smuzhiyun static pgd_t *xen_get_user_pgd(pgd_t *pgd)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
445*4882a593Smuzhiyun 	unsigned offset = pgd - pgd_page;
446*4882a593Smuzhiyun 	pgd_t *user_ptr = NULL;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (offset < pgd_index(USER_LIMIT)) {
449*4882a593Smuzhiyun 		struct page *page = virt_to_page(pgd_page);
450*4882a593Smuzhiyun 		user_ptr = (pgd_t *)page->private;
451*4882a593Smuzhiyun 		if (user_ptr)
452*4882a593Smuzhiyun 			user_ptr += offset;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	return user_ptr;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
__xen_set_p4d_hyper(p4d_t * ptr,p4d_t val)458*4882a593Smuzhiyun static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	struct mmu_update u;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	u.ptr = virt_to_machine(ptr).maddr;
463*4882a593Smuzhiyun 	u.val = p4d_val_ma(val);
464*4882a593Smuzhiyun 	xen_extend_mmu_update(&u);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun  * Raw hypercall-based set_p4d, intended for in early boot before
469*4882a593Smuzhiyun  * there's a page structure.  This implies:
470*4882a593Smuzhiyun  *  1. The only existing pagetable is the kernel's
471*4882a593Smuzhiyun  *  2. It is always pinned
472*4882a593Smuzhiyun  *  3. It has no user pagetable attached to it
473*4882a593Smuzhiyun  */
xen_set_p4d_hyper(p4d_t * ptr,p4d_t val)474*4882a593Smuzhiyun static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	preempt_disable();
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	xen_mc_batch();
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	__xen_set_p4d_hyper(ptr, val);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	preempt_enable();
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
xen_set_p4d(p4d_t * ptr,p4d_t val)487*4882a593Smuzhiyun static void xen_set_p4d(p4d_t *ptr, p4d_t val)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
490*4882a593Smuzhiyun 	pgd_t pgd_val;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/* If page is not pinned, we can just update the entry
495*4882a593Smuzhiyun 	   directly */
496*4882a593Smuzhiyun 	if (!xen_page_pinned(ptr)) {
497*4882a593Smuzhiyun 		*ptr = val;
498*4882a593Smuzhiyun 		if (user_ptr) {
499*4882a593Smuzhiyun 			WARN_ON(xen_page_pinned(user_ptr));
500*4882a593Smuzhiyun 			pgd_val.pgd = p4d_val_ma(val);
501*4882a593Smuzhiyun 			*user_ptr = pgd_val;
502*4882a593Smuzhiyun 		}
503*4882a593Smuzhiyun 		return;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	/* If it's pinned, then we can at least batch the kernel and
507*4882a593Smuzhiyun 	   user updates together. */
508*4882a593Smuzhiyun 	xen_mc_batch();
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	__xen_set_p4d_hyper(ptr, val);
511*4882a593Smuzhiyun 	if (user_ptr)
512*4882a593Smuzhiyun 		__xen_set_p4d_hyper((p4d_t *)user_ptr, val);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS >= 5
xen_p4d_val(p4d_t p4d)518*4882a593Smuzhiyun __visible p4dval_t xen_p4d_val(p4d_t p4d)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	return pte_mfn_to_pfn(p4d.p4d);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
523*4882a593Smuzhiyun 
xen_make_p4d(p4dval_t p4d)524*4882a593Smuzhiyun __visible p4d_t xen_make_p4d(p4dval_t p4d)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	p4d = pte_pfn_to_mfn(p4d);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	return native_make_p4d(p4d);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
531*4882a593Smuzhiyun #endif  /* CONFIG_PGTABLE_LEVELS >= 5 */
532*4882a593Smuzhiyun 
xen_pmd_walk(struct mm_struct * mm,pmd_t * pmd,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),bool last,unsigned long limit)533*4882a593Smuzhiyun static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
534*4882a593Smuzhiyun 			 void (*func)(struct mm_struct *mm, struct page *,
535*4882a593Smuzhiyun 				      enum pt_level),
536*4882a593Smuzhiyun 			 bool last, unsigned long limit)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	int i, nr;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
541*4882a593Smuzhiyun 	for (i = 0; i < nr; i++) {
542*4882a593Smuzhiyun 		if (!pmd_none(pmd[i]))
543*4882a593Smuzhiyun 			(*func)(mm, pmd_page(pmd[i]), PT_PTE);
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
xen_pud_walk(struct mm_struct * mm,pud_t * pud,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),bool last,unsigned long limit)547*4882a593Smuzhiyun static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
548*4882a593Smuzhiyun 			 void (*func)(struct mm_struct *mm, struct page *,
549*4882a593Smuzhiyun 				      enum pt_level),
550*4882a593Smuzhiyun 			 bool last, unsigned long limit)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	int i, nr;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
555*4882a593Smuzhiyun 	for (i = 0; i < nr; i++) {
556*4882a593Smuzhiyun 		pmd_t *pmd;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		if (pud_none(pud[i]))
559*4882a593Smuzhiyun 			continue;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		pmd = pmd_offset(&pud[i], 0);
562*4882a593Smuzhiyun 		if (PTRS_PER_PMD > 1)
563*4882a593Smuzhiyun 			(*func)(mm, virt_to_page(pmd), PT_PMD);
564*4882a593Smuzhiyun 		xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
xen_p4d_walk(struct mm_struct * mm,p4d_t * p4d,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),bool last,unsigned long limit)568*4882a593Smuzhiyun static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
569*4882a593Smuzhiyun 			 void (*func)(struct mm_struct *mm, struct page *,
570*4882a593Smuzhiyun 				      enum pt_level),
571*4882a593Smuzhiyun 			 bool last, unsigned long limit)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	pud_t *pud;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	if (p4d_none(*p4d))
577*4882a593Smuzhiyun 		return;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	pud = pud_offset(p4d, 0);
580*4882a593Smuzhiyun 	if (PTRS_PER_PUD > 1)
581*4882a593Smuzhiyun 		(*func)(mm, virt_to_page(pud), PT_PUD);
582*4882a593Smuzhiyun 	xen_pud_walk(mm, pud, func, last, limit);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun /*
586*4882a593Smuzhiyun  * (Yet another) pagetable walker.  This one is intended for pinning a
587*4882a593Smuzhiyun  * pagetable.  This means that it walks a pagetable and calls the
588*4882a593Smuzhiyun  * callback function on each page it finds making up the page table,
589*4882a593Smuzhiyun  * at every level.  It walks the entire pagetable, but it only bothers
590*4882a593Smuzhiyun  * pinning pte pages which are below limit.  In the normal case this
591*4882a593Smuzhiyun  * will be STACK_TOP_MAX, but at boot we need to pin up to
592*4882a593Smuzhiyun  * FIXADDR_TOP.
593*4882a593Smuzhiyun  *
594*4882a593Smuzhiyun  * We must skip the Xen hole in the middle of the address space, just after
595*4882a593Smuzhiyun  * the big x86-64 virtual hole.
596*4882a593Smuzhiyun  */
__xen_pgd_walk(struct mm_struct * mm,pgd_t * pgd,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),unsigned long limit)597*4882a593Smuzhiyun static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
598*4882a593Smuzhiyun 			   void (*func)(struct mm_struct *mm, struct page *,
599*4882a593Smuzhiyun 					enum pt_level),
600*4882a593Smuzhiyun 			   unsigned long limit)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	int i, nr;
603*4882a593Smuzhiyun 	unsigned hole_low = 0, hole_high = 0;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	/* The limit is the last byte to be touched */
606*4882a593Smuzhiyun 	limit--;
607*4882a593Smuzhiyun 	BUG_ON(limit >= FIXADDR_TOP);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	/*
610*4882a593Smuzhiyun 	 * 64-bit has a great big hole in the middle of the address
611*4882a593Smuzhiyun 	 * space, which contains the Xen mappings.
612*4882a593Smuzhiyun 	 */
613*4882a593Smuzhiyun 	hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
614*4882a593Smuzhiyun 	hole_high = pgd_index(GUARD_HOLE_END_ADDR);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	nr = pgd_index(limit) + 1;
617*4882a593Smuzhiyun 	for (i = 0; i < nr; i++) {
618*4882a593Smuzhiyun 		p4d_t *p4d;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		if (i >= hole_low && i < hole_high)
621*4882a593Smuzhiyun 			continue;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 		if (pgd_none(pgd[i]))
624*4882a593Smuzhiyun 			continue;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		p4d = p4d_offset(&pgd[i], 0);
627*4882a593Smuzhiyun 		xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/* Do the top level last, so that the callbacks can use it as
631*4882a593Smuzhiyun 	   a cue to do final things like tlb flushes. */
632*4882a593Smuzhiyun 	(*func)(mm, virt_to_page(pgd), PT_PGD);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
xen_pgd_walk(struct mm_struct * mm,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),unsigned long limit)635*4882a593Smuzhiyun static void xen_pgd_walk(struct mm_struct *mm,
636*4882a593Smuzhiyun 			 void (*func)(struct mm_struct *mm, struct page *,
637*4882a593Smuzhiyun 				      enum pt_level),
638*4882a593Smuzhiyun 			 unsigned long limit)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	__xen_pgd_walk(mm, mm->pgd, func, limit);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun /* If we're using split pte locks, then take the page's lock and
644*4882a593Smuzhiyun    return a pointer to it.  Otherwise return NULL. */
xen_pte_lock(struct page * page,struct mm_struct * mm)645*4882a593Smuzhiyun static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	spinlock_t *ptl = NULL;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun #if USE_SPLIT_PTE_PTLOCKS
650*4882a593Smuzhiyun 	ptl = ptlock_ptr(page);
651*4882a593Smuzhiyun 	spin_lock_nest_lock(ptl, &mm->page_table_lock);
652*4882a593Smuzhiyun #endif
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	return ptl;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
xen_pte_unlock(void * v)657*4882a593Smuzhiyun static void xen_pte_unlock(void *v)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	spinlock_t *ptl = v;
660*4882a593Smuzhiyun 	spin_unlock(ptl);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun 
xen_do_pin(unsigned level,unsigned long pfn)663*4882a593Smuzhiyun static void xen_do_pin(unsigned level, unsigned long pfn)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct mmuext_op op;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	op.cmd = level;
668*4882a593Smuzhiyun 	op.arg1.mfn = pfn_to_mfn(pfn);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	xen_extend_mmuext_op(&op);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
xen_pin_page(struct mm_struct * mm,struct page * page,enum pt_level level)673*4882a593Smuzhiyun static void xen_pin_page(struct mm_struct *mm, struct page *page,
674*4882a593Smuzhiyun 			 enum pt_level level)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun 	unsigned pgfl = TestSetPagePinned(page);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if (!pgfl) {
679*4882a593Smuzhiyun 		void *pt = lowmem_page_address(page);
680*4882a593Smuzhiyun 		unsigned long pfn = page_to_pfn(page);
681*4882a593Smuzhiyun 		struct multicall_space mcs = __xen_mc_entry(0);
682*4882a593Smuzhiyun 		spinlock_t *ptl;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		/*
685*4882a593Smuzhiyun 		 * We need to hold the pagetable lock between the time
686*4882a593Smuzhiyun 		 * we make the pagetable RO and when we actually pin
687*4882a593Smuzhiyun 		 * it.  If we don't, then other users may come in and
688*4882a593Smuzhiyun 		 * attempt to update the pagetable by writing it,
689*4882a593Smuzhiyun 		 * which will fail because the memory is RO but not
690*4882a593Smuzhiyun 		 * pinned, so Xen won't do the trap'n'emulate.
691*4882a593Smuzhiyun 		 *
692*4882a593Smuzhiyun 		 * If we're using split pte locks, we can't hold the
693*4882a593Smuzhiyun 		 * entire pagetable's worth of locks during the
694*4882a593Smuzhiyun 		 * traverse, because we may wrap the preempt count (8
695*4882a593Smuzhiyun 		 * bits).  The solution is to mark RO and pin each PTE
696*4882a593Smuzhiyun 		 * page while holding the lock.  This means the number
697*4882a593Smuzhiyun 		 * of locks we end up holding is never more than a
698*4882a593Smuzhiyun 		 * batch size (~32 entries, at present).
699*4882a593Smuzhiyun 		 *
700*4882a593Smuzhiyun 		 * If we're not using split pte locks, we needn't pin
701*4882a593Smuzhiyun 		 * the PTE pages independently, because we're
702*4882a593Smuzhiyun 		 * protected by the overall pagetable lock.
703*4882a593Smuzhiyun 		 */
704*4882a593Smuzhiyun 		ptl = NULL;
705*4882a593Smuzhiyun 		if (level == PT_PTE)
706*4882a593Smuzhiyun 			ptl = xen_pte_lock(page, mm);
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
709*4882a593Smuzhiyun 					pfn_pte(pfn, PAGE_KERNEL_RO),
710*4882a593Smuzhiyun 					level == PT_PGD ? UVMF_TLB_FLUSH : 0);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 		if (ptl) {
713*4882a593Smuzhiyun 			xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 			/* Queue a deferred unlock for when this batch
716*4882a593Smuzhiyun 			   is completed. */
717*4882a593Smuzhiyun 			xen_mc_callback(xen_pte_unlock, ptl);
718*4882a593Smuzhiyun 		}
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun /* This is called just after a mm has been created, but it has not
723*4882a593Smuzhiyun    been used yet.  We need to make sure that its pagetable is all
724*4882a593Smuzhiyun    read-only, and can be pinned. */
__xen_pgd_pin(struct mm_struct * mm,pgd_t * pgd)725*4882a593Smuzhiyun static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	pgd_t *user_pgd = xen_get_user_pgd(pgd);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	trace_xen_mmu_pgd_pin(mm, pgd);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	xen_mc_batch();
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (user_pgd) {
738*4882a593Smuzhiyun 		xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
739*4882a593Smuzhiyun 		xen_do_pin(MMUEXT_PIN_L4_TABLE,
740*4882a593Smuzhiyun 			   PFN_DOWN(__pa(user_pgd)));
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	xen_mc_issue(0);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
xen_pgd_pin(struct mm_struct * mm)746*4882a593Smuzhiyun static void xen_pgd_pin(struct mm_struct *mm)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun 	__xen_pgd_pin(mm, mm->pgd);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun /*
752*4882a593Smuzhiyun  * On save, we need to pin all pagetables to make sure they get their
753*4882a593Smuzhiyun  * mfns turned into pfns.  Search the list for any unpinned pgds and pin
754*4882a593Smuzhiyun  * them (unpinned pgds are not currently in use, probably because the
755*4882a593Smuzhiyun  * process is under construction or destruction).
756*4882a593Smuzhiyun  *
757*4882a593Smuzhiyun  * Expected to be called in stop_machine() ("equivalent to taking
758*4882a593Smuzhiyun  * every spinlock in the system"), so the locking doesn't really
759*4882a593Smuzhiyun  * matter all that much.
760*4882a593Smuzhiyun  */
xen_mm_pin_all(void)761*4882a593Smuzhiyun void xen_mm_pin_all(void)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun 	struct page *page;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	spin_lock(&pgd_lock);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	list_for_each_entry(page, &pgd_list, lru) {
768*4882a593Smuzhiyun 		if (!PagePinned(page)) {
769*4882a593Smuzhiyun 			__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
770*4882a593Smuzhiyun 			SetPageSavePinned(page);
771*4882a593Smuzhiyun 		}
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	spin_unlock(&pgd_lock);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
xen_mark_pinned(struct mm_struct * mm,struct page * page,enum pt_level level)777*4882a593Smuzhiyun static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
778*4882a593Smuzhiyun 				   enum pt_level level)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	SetPagePinned(page);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun /*
784*4882a593Smuzhiyun  * The init_mm pagetable is really pinned as soon as its created, but
785*4882a593Smuzhiyun  * that's before we have page structures to store the bits.  So do all
786*4882a593Smuzhiyun  * the book-keeping now once struct pages for allocated pages are
787*4882a593Smuzhiyun  * initialized. This happens only after memblock_free_all() is called.
788*4882a593Smuzhiyun  */
xen_after_bootmem(void)789*4882a593Smuzhiyun static void __init xen_after_bootmem(void)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	static_branch_enable(&xen_struct_pages_ready);
792*4882a593Smuzhiyun 	SetPagePinned(virt_to_page(level3_user_vsyscall));
793*4882a593Smuzhiyun 	xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun 
xen_unpin_page(struct mm_struct * mm,struct page * page,enum pt_level level)796*4882a593Smuzhiyun static void xen_unpin_page(struct mm_struct *mm, struct page *page,
797*4882a593Smuzhiyun 			   enum pt_level level)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	unsigned pgfl = TestClearPagePinned(page);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (pgfl) {
802*4882a593Smuzhiyun 		void *pt = lowmem_page_address(page);
803*4882a593Smuzhiyun 		unsigned long pfn = page_to_pfn(page);
804*4882a593Smuzhiyun 		spinlock_t *ptl = NULL;
805*4882a593Smuzhiyun 		struct multicall_space mcs;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 		/*
808*4882a593Smuzhiyun 		 * Do the converse to pin_page.  If we're using split
809*4882a593Smuzhiyun 		 * pte locks, we must be holding the lock for while
810*4882a593Smuzhiyun 		 * the pte page is unpinned but still RO to prevent
811*4882a593Smuzhiyun 		 * concurrent updates from seeing it in this
812*4882a593Smuzhiyun 		 * partially-pinned state.
813*4882a593Smuzhiyun 		 */
814*4882a593Smuzhiyun 		if (level == PT_PTE) {
815*4882a593Smuzhiyun 			ptl = xen_pte_lock(page, mm);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 			if (ptl)
818*4882a593Smuzhiyun 				xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
819*4882a593Smuzhiyun 		}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 		mcs = __xen_mc_entry(0);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
824*4882a593Smuzhiyun 					pfn_pte(pfn, PAGE_KERNEL),
825*4882a593Smuzhiyun 					level == PT_PGD ? UVMF_TLB_FLUSH : 0);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		if (ptl) {
828*4882a593Smuzhiyun 			/* unlock when batch completed */
829*4882a593Smuzhiyun 			xen_mc_callback(xen_pte_unlock, ptl);
830*4882a593Smuzhiyun 		}
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun /* Release a pagetables pages back as normal RW */
__xen_pgd_unpin(struct mm_struct * mm,pgd_t * pgd)835*4882a593Smuzhiyun static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	pgd_t *user_pgd = xen_get_user_pgd(pgd);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	trace_xen_mmu_pgd_unpin(mm, pgd);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	xen_mc_batch();
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	if (user_pgd) {
846*4882a593Smuzhiyun 		xen_do_pin(MMUEXT_UNPIN_TABLE,
847*4882a593Smuzhiyun 			   PFN_DOWN(__pa(user_pgd)));
848*4882a593Smuzhiyun 		xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
849*4882a593Smuzhiyun 	}
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	xen_mc_issue(0);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
xen_pgd_unpin(struct mm_struct * mm)856*4882a593Smuzhiyun static void xen_pgd_unpin(struct mm_struct *mm)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	__xen_pgd_unpin(mm, mm->pgd);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun /*
862*4882a593Smuzhiyun  * On resume, undo any pinning done at save, so that the rest of the
863*4882a593Smuzhiyun  * kernel doesn't see any unexpected pinned pagetables.
864*4882a593Smuzhiyun  */
xen_mm_unpin_all(void)865*4882a593Smuzhiyun void xen_mm_unpin_all(void)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	struct page *page;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	spin_lock(&pgd_lock);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	list_for_each_entry(page, &pgd_list, lru) {
872*4882a593Smuzhiyun 		if (PageSavePinned(page)) {
873*4882a593Smuzhiyun 			BUG_ON(!PagePinned(page));
874*4882a593Smuzhiyun 			__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
875*4882a593Smuzhiyun 			ClearPageSavePinned(page);
876*4882a593Smuzhiyun 		}
877*4882a593Smuzhiyun 	}
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	spin_unlock(&pgd_lock);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun 
xen_activate_mm(struct mm_struct * prev,struct mm_struct * next)882*4882a593Smuzhiyun static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun 	spin_lock(&next->page_table_lock);
885*4882a593Smuzhiyun 	xen_pgd_pin(next);
886*4882a593Smuzhiyun 	spin_unlock(&next->page_table_lock);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun 
xen_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm)889*4882a593Smuzhiyun static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun 	spin_lock(&mm->page_table_lock);
892*4882a593Smuzhiyun 	xen_pgd_pin(mm);
893*4882a593Smuzhiyun 	spin_unlock(&mm->page_table_lock);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
drop_mm_ref_this_cpu(void * info)896*4882a593Smuzhiyun static void drop_mm_ref_this_cpu(void *info)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	struct mm_struct *mm = info;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
901*4882a593Smuzhiyun 		leave_mm(smp_processor_id());
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	/*
904*4882a593Smuzhiyun 	 * If this cpu still has a stale cr3 reference, then make sure
905*4882a593Smuzhiyun 	 * it has been flushed.
906*4882a593Smuzhiyun 	 */
907*4882a593Smuzhiyun 	if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
908*4882a593Smuzhiyun 		xen_mc_flush();
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun #ifdef CONFIG_SMP
912*4882a593Smuzhiyun /*
913*4882a593Smuzhiyun  * Another cpu may still have their %cr3 pointing at the pagetable, so
914*4882a593Smuzhiyun  * we need to repoint it somewhere else before we can unpin it.
915*4882a593Smuzhiyun  */
xen_drop_mm_ref(struct mm_struct * mm)916*4882a593Smuzhiyun static void xen_drop_mm_ref(struct mm_struct *mm)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun 	cpumask_var_t mask;
919*4882a593Smuzhiyun 	unsigned cpu;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	drop_mm_ref_this_cpu(mm);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	/* Get the "official" set of cpus referring to our pagetable. */
924*4882a593Smuzhiyun 	if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
925*4882a593Smuzhiyun 		for_each_online_cpu(cpu) {
926*4882a593Smuzhiyun 			if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
927*4882a593Smuzhiyun 				continue;
928*4882a593Smuzhiyun 			smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
929*4882a593Smuzhiyun 		}
930*4882a593Smuzhiyun 		return;
931*4882a593Smuzhiyun 	}
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	/*
934*4882a593Smuzhiyun 	 * It's possible that a vcpu may have a stale reference to our
935*4882a593Smuzhiyun 	 * cr3, because its in lazy mode, and it hasn't yet flushed
936*4882a593Smuzhiyun 	 * its set of pending hypercalls yet.  In this case, we can
937*4882a593Smuzhiyun 	 * look at its actual current cr3 value, and force it to flush
938*4882a593Smuzhiyun 	 * if needed.
939*4882a593Smuzhiyun 	 */
940*4882a593Smuzhiyun 	cpumask_clear(mask);
941*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
942*4882a593Smuzhiyun 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
943*4882a593Smuzhiyun 			cpumask_set_cpu(cpu, mask);
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
947*4882a593Smuzhiyun 	free_cpumask_var(mask);
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun #else
xen_drop_mm_ref(struct mm_struct * mm)950*4882a593Smuzhiyun static void xen_drop_mm_ref(struct mm_struct *mm)
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun 	drop_mm_ref_this_cpu(mm);
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun #endif
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun /*
957*4882a593Smuzhiyun  * While a process runs, Xen pins its pagetables, which means that the
958*4882a593Smuzhiyun  * hypervisor forces it to be read-only, and it controls all updates
959*4882a593Smuzhiyun  * to it.  This means that all pagetable updates have to go via the
960*4882a593Smuzhiyun  * hypervisor, which is moderately expensive.
961*4882a593Smuzhiyun  *
962*4882a593Smuzhiyun  * Since we're pulling the pagetable down, we switch to use init_mm,
963*4882a593Smuzhiyun  * unpin old process pagetable and mark it all read-write, which
964*4882a593Smuzhiyun  * allows further operations on it to be simple memory accesses.
965*4882a593Smuzhiyun  *
966*4882a593Smuzhiyun  * The only subtle point is that another CPU may be still using the
967*4882a593Smuzhiyun  * pagetable because of lazy tlb flushing.  This means we need need to
968*4882a593Smuzhiyun  * switch all CPUs off this pagetable before we can unpin it.
969*4882a593Smuzhiyun  */
xen_exit_mmap(struct mm_struct * mm)970*4882a593Smuzhiyun static void xen_exit_mmap(struct mm_struct *mm)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun 	get_cpu();		/* make sure we don't move around */
973*4882a593Smuzhiyun 	xen_drop_mm_ref(mm);
974*4882a593Smuzhiyun 	put_cpu();
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	spin_lock(&mm->page_table_lock);
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	/* pgd may not be pinned in the error exit path of execve */
979*4882a593Smuzhiyun 	if (xen_page_pinned(mm->pgd))
980*4882a593Smuzhiyun 		xen_pgd_unpin(mm);
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	spin_unlock(&mm->page_table_lock);
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun static void xen_post_allocator_init(void);
986*4882a593Smuzhiyun 
pin_pagetable_pfn(unsigned cmd,unsigned long pfn)987*4882a593Smuzhiyun static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun 	struct mmuext_op op;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	op.cmd = cmd;
992*4882a593Smuzhiyun 	op.arg1.mfn = pfn_to_mfn(pfn);
993*4882a593Smuzhiyun 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
994*4882a593Smuzhiyun 		BUG();
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun 
xen_cleanhighmap(unsigned long vaddr,unsigned long vaddr_end)997*4882a593Smuzhiyun static void __init xen_cleanhighmap(unsigned long vaddr,
998*4882a593Smuzhiyun 				    unsigned long vaddr_end)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun 	unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1001*4882a593Smuzhiyun 	pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	/* NOTE: The loop is more greedy than the cleanup_highmap variant.
1004*4882a593Smuzhiyun 	 * We include the PMD passed in on _both_ boundaries. */
1005*4882a593Smuzhiyun 	for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1006*4882a593Smuzhiyun 			pmd++, vaddr += PMD_SIZE) {
1007*4882a593Smuzhiyun 		if (pmd_none(*pmd))
1008*4882a593Smuzhiyun 			continue;
1009*4882a593Smuzhiyun 		if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1010*4882a593Smuzhiyun 			set_pmd(pmd, __pmd(0));
1011*4882a593Smuzhiyun 	}
1012*4882a593Smuzhiyun 	/* In case we did something silly, we should crash in this function
1013*4882a593Smuzhiyun 	 * instead of somewhere later and be confusing. */
1014*4882a593Smuzhiyun 	xen_mc_flush();
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun /*
1018*4882a593Smuzhiyun  * Make a page range writeable and free it.
1019*4882a593Smuzhiyun  */
xen_free_ro_pages(unsigned long paddr,unsigned long size)1020*4882a593Smuzhiyun static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun 	void *vaddr = __va(paddr);
1023*4882a593Smuzhiyun 	void *vaddr_end = vaddr + size;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1026*4882a593Smuzhiyun 		make_lowmem_page_readwrite(vaddr);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	memblock_free(paddr, size);
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun 
xen_cleanmfnmap_free_pgtbl(void * pgtbl,bool unpin)1031*4882a593Smuzhiyun static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun 	unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	if (unpin)
1036*4882a593Smuzhiyun 		pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1037*4882a593Smuzhiyun 	ClearPagePinned(virt_to_page(__va(pa)));
1038*4882a593Smuzhiyun 	xen_free_ro_pages(pa, PAGE_SIZE);
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
xen_cleanmfnmap_pmd(pmd_t * pmd,bool unpin)1041*4882a593Smuzhiyun static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	unsigned long pa;
1044*4882a593Smuzhiyun 	pte_t *pte_tbl;
1045*4882a593Smuzhiyun 	int i;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	if (pmd_large(*pmd)) {
1048*4882a593Smuzhiyun 		pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1049*4882a593Smuzhiyun 		xen_free_ro_pages(pa, PMD_SIZE);
1050*4882a593Smuzhiyun 		return;
1051*4882a593Smuzhiyun 	}
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	pte_tbl = pte_offset_kernel(pmd, 0);
1054*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++) {
1055*4882a593Smuzhiyun 		if (pte_none(pte_tbl[i]))
1056*4882a593Smuzhiyun 			continue;
1057*4882a593Smuzhiyun 		pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1058*4882a593Smuzhiyun 		xen_free_ro_pages(pa, PAGE_SIZE);
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 	set_pmd(pmd, __pmd(0));
1061*4882a593Smuzhiyun 	xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
xen_cleanmfnmap_pud(pud_t * pud,bool unpin)1064*4882a593Smuzhiyun static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun 	unsigned long pa;
1067*4882a593Smuzhiyun 	pmd_t *pmd_tbl;
1068*4882a593Smuzhiyun 	int i;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	if (pud_large(*pud)) {
1071*4882a593Smuzhiyun 		pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1072*4882a593Smuzhiyun 		xen_free_ro_pages(pa, PUD_SIZE);
1073*4882a593Smuzhiyun 		return;
1074*4882a593Smuzhiyun 	}
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	pmd_tbl = pmd_offset(pud, 0);
1077*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PMD; i++) {
1078*4882a593Smuzhiyun 		if (pmd_none(pmd_tbl[i]))
1079*4882a593Smuzhiyun 			continue;
1080*4882a593Smuzhiyun 		xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1081*4882a593Smuzhiyun 	}
1082*4882a593Smuzhiyun 	set_pud(pud, __pud(0));
1083*4882a593Smuzhiyun 	xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun 
xen_cleanmfnmap_p4d(p4d_t * p4d,bool unpin)1086*4882a593Smuzhiyun static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	unsigned long pa;
1089*4882a593Smuzhiyun 	pud_t *pud_tbl;
1090*4882a593Smuzhiyun 	int i;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	if (p4d_large(*p4d)) {
1093*4882a593Smuzhiyun 		pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1094*4882a593Smuzhiyun 		xen_free_ro_pages(pa, P4D_SIZE);
1095*4882a593Smuzhiyun 		return;
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	pud_tbl = pud_offset(p4d, 0);
1099*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PUD; i++) {
1100*4882a593Smuzhiyun 		if (pud_none(pud_tbl[i]))
1101*4882a593Smuzhiyun 			continue;
1102*4882a593Smuzhiyun 		xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 	set_p4d(p4d, __p4d(0));
1105*4882a593Smuzhiyun 	xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun /*
1109*4882a593Smuzhiyun  * Since it is well isolated we can (and since it is perhaps large we should)
1110*4882a593Smuzhiyun  * also free the page tables mapping the initial P->M table.
1111*4882a593Smuzhiyun  */
xen_cleanmfnmap(unsigned long vaddr)1112*4882a593Smuzhiyun static void __init xen_cleanmfnmap(unsigned long vaddr)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun 	pgd_t *pgd;
1115*4882a593Smuzhiyun 	p4d_t *p4d;
1116*4882a593Smuzhiyun 	bool unpin;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	unpin = (vaddr == 2 * PGDIR_SIZE);
1119*4882a593Smuzhiyun 	vaddr &= PMD_MASK;
1120*4882a593Smuzhiyun 	pgd = pgd_offset_k(vaddr);
1121*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, 0);
1122*4882a593Smuzhiyun 	if (!p4d_none(*p4d))
1123*4882a593Smuzhiyun 		xen_cleanmfnmap_p4d(p4d, unpin);
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun 
xen_pagetable_p2m_free(void)1126*4882a593Smuzhiyun static void __init xen_pagetable_p2m_free(void)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun 	unsigned long size;
1129*4882a593Smuzhiyun 	unsigned long addr;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	/* No memory or already called. */
1134*4882a593Smuzhiyun 	if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1135*4882a593Smuzhiyun 		return;
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	/* using __ka address and sticking INVALID_P2M_ENTRY! */
1138*4882a593Smuzhiyun 	memset((void *)xen_start_info->mfn_list, 0xff, size);
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	addr = xen_start_info->mfn_list;
1141*4882a593Smuzhiyun 	/*
1142*4882a593Smuzhiyun 	 * We could be in __ka space.
1143*4882a593Smuzhiyun 	 * We roundup to the PMD, which means that if anybody at this stage is
1144*4882a593Smuzhiyun 	 * using the __ka address of xen_start_info or
1145*4882a593Smuzhiyun 	 * xen_start_info->shared_info they are in going to crash. Fortunately
1146*4882a593Smuzhiyun 	 * we have already revectored in xen_setup_kernel_pagetable.
1147*4882a593Smuzhiyun 	 */
1148*4882a593Smuzhiyun 	size = roundup(size, PMD_SIZE);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	if (addr >= __START_KERNEL_map) {
1151*4882a593Smuzhiyun 		xen_cleanhighmap(addr, addr + size);
1152*4882a593Smuzhiyun 		size = PAGE_ALIGN(xen_start_info->nr_pages *
1153*4882a593Smuzhiyun 				  sizeof(unsigned long));
1154*4882a593Smuzhiyun 		memblock_free(__pa(addr), size);
1155*4882a593Smuzhiyun 	} else {
1156*4882a593Smuzhiyun 		xen_cleanmfnmap(addr);
1157*4882a593Smuzhiyun 	}
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun 
xen_pagetable_cleanhighmap(void)1160*4882a593Smuzhiyun static void __init xen_pagetable_cleanhighmap(void)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun 	unsigned long size;
1163*4882a593Smuzhiyun 	unsigned long addr;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	/* At this stage, cleanup_highmap has already cleaned __ka space
1166*4882a593Smuzhiyun 	 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1167*4882a593Smuzhiyun 	 * the ramdisk). We continue on, erasing PMD entries that point to page
1168*4882a593Smuzhiyun 	 * tables - do note that they are accessible at this stage via __va.
1169*4882a593Smuzhiyun 	 * As Xen is aligning the memory end to a 4MB boundary, for good
1170*4882a593Smuzhiyun 	 * measure we also round up to PMD_SIZE * 2 - which means that if
1171*4882a593Smuzhiyun 	 * anybody is using __ka address to the initial boot-stack - and try
1172*4882a593Smuzhiyun 	 * to use it - they are going to crash. The xen_start_info has been
1173*4882a593Smuzhiyun 	 * taken care of already in xen_setup_kernel_pagetable. */
1174*4882a593Smuzhiyun 	addr = xen_start_info->pt_base;
1175*4882a593Smuzhiyun 	size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1178*4882a593Smuzhiyun 	xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun 
xen_pagetable_p2m_setup(void)1181*4882a593Smuzhiyun static void __init xen_pagetable_p2m_setup(void)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun 	xen_vmalloc_p2m_tree();
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	xen_pagetable_p2m_free();
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	xen_pagetable_cleanhighmap();
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	/* And revector! Bye bye old array */
1190*4882a593Smuzhiyun 	xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun 
xen_pagetable_init(void)1193*4882a593Smuzhiyun static void __init xen_pagetable_init(void)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun 	paging_init();
1196*4882a593Smuzhiyun 	xen_post_allocator_init();
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	xen_pagetable_p2m_setup();
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	/* Allocate and initialize top and mid mfn levels for p2m structure */
1201*4882a593Smuzhiyun 	xen_build_mfn_list_list();
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	/* Remap memory freed due to conflicts with E820 map */
1204*4882a593Smuzhiyun 	xen_remap_memory();
1205*4882a593Smuzhiyun 	xen_setup_mfn_list_list();
1206*4882a593Smuzhiyun }
xen_write_cr2(unsigned long cr2)1207*4882a593Smuzhiyun static void xen_write_cr2(unsigned long cr2)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun 
xen_flush_tlb(void)1212*4882a593Smuzhiyun static noinline void xen_flush_tlb(void)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun 	struct mmuext_op *op;
1215*4882a593Smuzhiyun 	struct multicall_space mcs;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	preempt_disable();
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	mcs = xen_mc_entry(sizeof(*op));
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	op = mcs.args;
1222*4882a593Smuzhiyun 	op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1223*4882a593Smuzhiyun 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	preempt_enable();
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
xen_flush_tlb_one_user(unsigned long addr)1230*4882a593Smuzhiyun static void xen_flush_tlb_one_user(unsigned long addr)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun 	struct mmuext_op *op;
1233*4882a593Smuzhiyun 	struct multicall_space mcs;
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	trace_xen_mmu_flush_tlb_one_user(addr);
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	preempt_disable();
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	mcs = xen_mc_entry(sizeof(*op));
1240*4882a593Smuzhiyun 	op = mcs.args;
1241*4882a593Smuzhiyun 	op->cmd = MMUEXT_INVLPG_LOCAL;
1242*4882a593Smuzhiyun 	op->arg1.linear_addr = addr & PAGE_MASK;
1243*4882a593Smuzhiyun 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	preempt_enable();
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun 
xen_flush_tlb_others(const struct cpumask * cpus,const struct flush_tlb_info * info)1250*4882a593Smuzhiyun static void xen_flush_tlb_others(const struct cpumask *cpus,
1251*4882a593Smuzhiyun 				 const struct flush_tlb_info *info)
1252*4882a593Smuzhiyun {
1253*4882a593Smuzhiyun 	struct {
1254*4882a593Smuzhiyun 		struct mmuext_op op;
1255*4882a593Smuzhiyun 		DECLARE_BITMAP(mask, NR_CPUS);
1256*4882a593Smuzhiyun 	} *args;
1257*4882a593Smuzhiyun 	struct multicall_space mcs;
1258*4882a593Smuzhiyun 	const size_t mc_entry_size = sizeof(args->op) +
1259*4882a593Smuzhiyun 		sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	if (cpumask_empty(cpus))
1264*4882a593Smuzhiyun 		return;		/* nothing to do */
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	mcs = xen_mc_entry(mc_entry_size);
1267*4882a593Smuzhiyun 	args = mcs.args;
1268*4882a593Smuzhiyun 	args->op.arg2.vcpumask = to_cpumask(args->mask);
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	/* Remove us, and any offline CPUS. */
1271*4882a593Smuzhiyun 	cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1272*4882a593Smuzhiyun 	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1275*4882a593Smuzhiyun 	if (info->end != TLB_FLUSH_ALL &&
1276*4882a593Smuzhiyun 	    (info->end - info->start) <= PAGE_SIZE) {
1277*4882a593Smuzhiyun 		args->op.cmd = MMUEXT_INVLPG_MULTI;
1278*4882a593Smuzhiyun 		args->op.arg1.linear_addr = info->start;
1279*4882a593Smuzhiyun 	}
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun 
xen_read_cr3(void)1286*4882a593Smuzhiyun static unsigned long xen_read_cr3(void)
1287*4882a593Smuzhiyun {
1288*4882a593Smuzhiyun 	return this_cpu_read(xen_cr3);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
set_current_cr3(void * v)1291*4882a593Smuzhiyun static void set_current_cr3(void *v)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun 	this_cpu_write(xen_current_cr3, (unsigned long)v);
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun 
__xen_write_cr3(bool kernel,unsigned long cr3)1296*4882a593Smuzhiyun static void __xen_write_cr3(bool kernel, unsigned long cr3)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun 	struct mmuext_op op;
1299*4882a593Smuzhiyun 	unsigned long mfn;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	trace_xen_mmu_write_cr3(kernel, cr3);
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	if (cr3)
1304*4882a593Smuzhiyun 		mfn = pfn_to_mfn(PFN_DOWN(cr3));
1305*4882a593Smuzhiyun 	else
1306*4882a593Smuzhiyun 		mfn = 0;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	WARN_ON(mfn == 0 && kernel);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1311*4882a593Smuzhiyun 	op.arg1.mfn = mfn;
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	xen_extend_mmuext_op(&op);
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	if (kernel) {
1316*4882a593Smuzhiyun 		this_cpu_write(xen_cr3, cr3);
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 		/* Update xen_current_cr3 once the batch has actually
1319*4882a593Smuzhiyun 		   been submitted. */
1320*4882a593Smuzhiyun 		xen_mc_callback(set_current_cr3, (void *)cr3);
1321*4882a593Smuzhiyun 	}
1322*4882a593Smuzhiyun }
xen_write_cr3(unsigned long cr3)1323*4882a593Smuzhiyun static void xen_write_cr3(unsigned long cr3)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun 	pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	BUG_ON(preemptible());
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	xen_mc_batch();  /* disables interrupts */
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	/* Update while interrupts are disabled, so its atomic with
1332*4882a593Smuzhiyun 	   respect to ipis */
1333*4882a593Smuzhiyun 	this_cpu_write(xen_cr3, cr3);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	__xen_write_cr3(true, cr3);
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	if (user_pgd)
1338*4882a593Smuzhiyun 		__xen_write_cr3(false, __pa(user_pgd));
1339*4882a593Smuzhiyun 	else
1340*4882a593Smuzhiyun 		__xen_write_cr3(false, 0);
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun /*
1346*4882a593Smuzhiyun  * At the start of the day - when Xen launches a guest, it has already
1347*4882a593Smuzhiyun  * built pagetables for the guest. We diligently look over them
1348*4882a593Smuzhiyun  * in xen_setup_kernel_pagetable and graft as appropriate them in the
1349*4882a593Smuzhiyun  * init_top_pgt and its friends. Then when we are happy we load
1350*4882a593Smuzhiyun  * the new init_top_pgt - and continue on.
1351*4882a593Smuzhiyun  *
1352*4882a593Smuzhiyun  * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1353*4882a593Smuzhiyun  * up the rest of the pagetables. When it has completed it loads the cr3.
1354*4882a593Smuzhiyun  * N.B. that baremetal would start at 'start_kernel' (and the early
1355*4882a593Smuzhiyun  * #PF handler would create bootstrap pagetables) - so we are running
1356*4882a593Smuzhiyun  * with the same assumptions as what to do when write_cr3 is executed
1357*4882a593Smuzhiyun  * at this point.
1358*4882a593Smuzhiyun  *
1359*4882a593Smuzhiyun  * Since there are no user-page tables at all, we have two variants
1360*4882a593Smuzhiyun  * of xen_write_cr3 - the early bootup (this one), and the late one
1361*4882a593Smuzhiyun  * (xen_write_cr3). The reason we have to do that is that in 64-bit
1362*4882a593Smuzhiyun  * the Linux kernel and user-space are both in ring 3 while the
1363*4882a593Smuzhiyun  * hypervisor is in ring 0.
1364*4882a593Smuzhiyun  */
xen_write_cr3_init(unsigned long cr3)1365*4882a593Smuzhiyun static void __init xen_write_cr3_init(unsigned long cr3)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun 	BUG_ON(preemptible());
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	xen_mc_batch();  /* disables interrupts */
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	/* Update while interrupts are disabled, so its atomic with
1372*4882a593Smuzhiyun 	   respect to ipis */
1373*4882a593Smuzhiyun 	this_cpu_write(xen_cr3, cr3);
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	__xen_write_cr3(true, cr3);
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun 
xen_pgd_alloc(struct mm_struct * mm)1380*4882a593Smuzhiyun static int xen_pgd_alloc(struct mm_struct *mm)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 	pgd_t *pgd = mm->pgd;
1383*4882a593Smuzhiyun 	struct page *page = virt_to_page(pgd);
1384*4882a593Smuzhiyun 	pgd_t *user_pgd;
1385*4882a593Smuzhiyun 	int ret = -ENOMEM;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	BUG_ON(PagePinned(virt_to_page(pgd)));
1388*4882a593Smuzhiyun 	BUG_ON(page->private != 0);
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1391*4882a593Smuzhiyun 	page->private = (unsigned long)user_pgd;
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	if (user_pgd != NULL) {
1394*4882a593Smuzhiyun #ifdef CONFIG_X86_VSYSCALL_EMULATION
1395*4882a593Smuzhiyun 		user_pgd[pgd_index(VSYSCALL_ADDR)] =
1396*4882a593Smuzhiyun 			__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1397*4882a593Smuzhiyun #endif
1398*4882a593Smuzhiyun 		ret = 0;
1399*4882a593Smuzhiyun 	}
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	return ret;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun 
xen_pgd_free(struct mm_struct * mm,pgd_t * pgd)1406*4882a593Smuzhiyun static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1407*4882a593Smuzhiyun {
1408*4882a593Smuzhiyun 	pgd_t *user_pgd = xen_get_user_pgd(pgd);
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	if (user_pgd)
1411*4882a593Smuzhiyun 		free_page((unsigned long)user_pgd);
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun /*
1415*4882a593Smuzhiyun  * Init-time set_pte while constructing initial pagetables, which
1416*4882a593Smuzhiyun  * doesn't allow RO page table pages to be remapped RW.
1417*4882a593Smuzhiyun  *
1418*4882a593Smuzhiyun  * If there is no MFN for this PFN then this page is initially
1419*4882a593Smuzhiyun  * ballooned out so clear the PTE (as in decrease_reservation() in
1420*4882a593Smuzhiyun  * drivers/xen/balloon.c).
1421*4882a593Smuzhiyun  *
1422*4882a593Smuzhiyun  * Many of these PTE updates are done on unpinned and writable pages
1423*4882a593Smuzhiyun  * and doing a hypercall for these is unnecessary and expensive.  At
1424*4882a593Smuzhiyun  * this point it is not possible to tell if a page is pinned or not,
1425*4882a593Smuzhiyun  * so always write the PTE directly and rely on Xen trapping and
1426*4882a593Smuzhiyun  * emulating any updates as necessary.
1427*4882a593Smuzhiyun  */
xen_make_pte_init(pteval_t pte)1428*4882a593Smuzhiyun __visible pte_t xen_make_pte_init(pteval_t pte)
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun 	unsigned long pfn;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	/*
1433*4882a593Smuzhiyun 	 * Pages belonging to the initial p2m list mapped outside the default
1434*4882a593Smuzhiyun 	 * address range must be mapped read-only. This region contains the
1435*4882a593Smuzhiyun 	 * page tables for mapping the p2m list, too, and page tables MUST be
1436*4882a593Smuzhiyun 	 * mapped read-only.
1437*4882a593Smuzhiyun 	 */
1438*4882a593Smuzhiyun 	pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1439*4882a593Smuzhiyun 	if (xen_start_info->mfn_list < __START_KERNEL_map &&
1440*4882a593Smuzhiyun 	    pfn >= xen_start_info->first_p2m_pfn &&
1441*4882a593Smuzhiyun 	    pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1442*4882a593Smuzhiyun 		pte &= ~_PAGE_RW;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	pte = pte_pfn_to_mfn(pte);
1445*4882a593Smuzhiyun 	return native_make_pte(pte);
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1448*4882a593Smuzhiyun 
xen_set_pte_init(pte_t * ptep,pte_t pte)1449*4882a593Smuzhiyun static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun 	__xen_set_pte(ptep, pte);
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun /* Early in boot, while setting up the initial pagetable, assume
1455*4882a593Smuzhiyun    everything is pinned. */
xen_alloc_pte_init(struct mm_struct * mm,unsigned long pfn)1456*4882a593Smuzhiyun static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun #ifdef CONFIG_FLATMEM
1459*4882a593Smuzhiyun 	BUG_ON(mem_map);	/* should only be used early */
1460*4882a593Smuzhiyun #endif
1461*4882a593Smuzhiyun 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1462*4882a593Smuzhiyun 	pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun /* Used for pmd and pud */
xen_alloc_pmd_init(struct mm_struct * mm,unsigned long pfn)1466*4882a593Smuzhiyun static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1467*4882a593Smuzhiyun {
1468*4882a593Smuzhiyun #ifdef CONFIG_FLATMEM
1469*4882a593Smuzhiyun 	BUG_ON(mem_map);	/* should only be used early */
1470*4882a593Smuzhiyun #endif
1471*4882a593Smuzhiyun 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun /* Early release_pte assumes that all pts are pinned, since there's
1475*4882a593Smuzhiyun    only init_mm and anything attached to that is pinned. */
xen_release_pte_init(unsigned long pfn)1476*4882a593Smuzhiyun static void __init xen_release_pte_init(unsigned long pfn)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1479*4882a593Smuzhiyun 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun 
xen_release_pmd_init(unsigned long pfn)1482*4882a593Smuzhiyun static void __init xen_release_pmd_init(unsigned long pfn)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun 
__pin_pagetable_pfn(unsigned cmd,unsigned long pfn)1487*4882a593Smuzhiyun static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun 	struct multicall_space mcs;
1490*4882a593Smuzhiyun 	struct mmuext_op *op;
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	mcs = __xen_mc_entry(sizeof(*op));
1493*4882a593Smuzhiyun 	op = mcs.args;
1494*4882a593Smuzhiyun 	op->cmd = cmd;
1495*4882a593Smuzhiyun 	op->arg1.mfn = pfn_to_mfn(pfn);
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun 
__set_pfn_prot(unsigned long pfn,pgprot_t prot)1500*4882a593Smuzhiyun static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1501*4882a593Smuzhiyun {
1502*4882a593Smuzhiyun 	struct multicall_space mcs;
1503*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	mcs = __xen_mc_entry(0);
1506*4882a593Smuzhiyun 	MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1507*4882a593Smuzhiyun 				pfn_pte(pfn, prot), 0);
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun /* This needs to make sure the new pte page is pinned iff its being
1511*4882a593Smuzhiyun    attached to a pinned pagetable. */
xen_alloc_ptpage(struct mm_struct * mm,unsigned long pfn,unsigned level)1512*4882a593Smuzhiyun static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1513*4882a593Smuzhiyun 				    unsigned level)
1514*4882a593Smuzhiyun {
1515*4882a593Smuzhiyun 	bool pinned = xen_page_pinned(mm->pgd);
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	if (pinned) {
1520*4882a593Smuzhiyun 		struct page *page = pfn_to_page(pfn);
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 		if (static_branch_likely(&xen_struct_pages_ready))
1523*4882a593Smuzhiyun 			SetPagePinned(page);
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 		xen_mc_batch();
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 		__set_pfn_prot(pfn, PAGE_KERNEL_RO);
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 		if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1530*4882a593Smuzhiyun 			__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 		xen_mc_issue(PARAVIRT_LAZY_MMU);
1533*4882a593Smuzhiyun 	}
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun 
xen_alloc_pte(struct mm_struct * mm,unsigned long pfn)1536*4882a593Smuzhiyun static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun 	xen_alloc_ptpage(mm, pfn, PT_PTE);
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun 
xen_alloc_pmd(struct mm_struct * mm,unsigned long pfn)1541*4882a593Smuzhiyun static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun 	xen_alloc_ptpage(mm, pfn, PT_PMD);
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun /* This should never happen until we're OK to use struct page */
xen_release_ptpage(unsigned long pfn,unsigned level)1547*4882a593Smuzhiyun static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun 	struct page *page = pfn_to_page(pfn);
1550*4882a593Smuzhiyun 	bool pinned = PagePinned(page);
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	trace_xen_mmu_release_ptpage(pfn, level, pinned);
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 	if (pinned) {
1555*4882a593Smuzhiyun 		xen_mc_batch();
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 		if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1558*4882a593Smuzhiyun 			__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 		__set_pfn_prot(pfn, PAGE_KERNEL);
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 		xen_mc_issue(PARAVIRT_LAZY_MMU);
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 		ClearPagePinned(page);
1565*4882a593Smuzhiyun 	}
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun 
xen_release_pte(unsigned long pfn)1568*4882a593Smuzhiyun static void xen_release_pte(unsigned long pfn)
1569*4882a593Smuzhiyun {
1570*4882a593Smuzhiyun 	xen_release_ptpage(pfn, PT_PTE);
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun 
xen_release_pmd(unsigned long pfn)1573*4882a593Smuzhiyun static void xen_release_pmd(unsigned long pfn)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun 	xen_release_ptpage(pfn, PT_PMD);
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun 
xen_alloc_pud(struct mm_struct * mm,unsigned long pfn)1578*4882a593Smuzhiyun static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1579*4882a593Smuzhiyun {
1580*4882a593Smuzhiyun 	xen_alloc_ptpage(mm, pfn, PT_PUD);
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun 
xen_release_pud(unsigned long pfn)1583*4882a593Smuzhiyun static void xen_release_pud(unsigned long pfn)
1584*4882a593Smuzhiyun {
1585*4882a593Smuzhiyun 	xen_release_ptpage(pfn, PT_PUD);
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun /*
1589*4882a593Smuzhiyun  * Like __va(), but returns address in the kernel mapping (which is
1590*4882a593Smuzhiyun  * all we have until the physical memory mapping has been set up.
1591*4882a593Smuzhiyun  */
__ka(phys_addr_t paddr)1592*4882a593Smuzhiyun static void * __init __ka(phys_addr_t paddr)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun 	return (void *)(paddr + __START_KERNEL_map);
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun /* Convert a machine address to physical address */
m2p(phys_addr_t maddr)1598*4882a593Smuzhiyun static unsigned long __init m2p(phys_addr_t maddr)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun 	phys_addr_t paddr;
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	maddr &= XEN_PTE_MFN_MASK;
1603*4882a593Smuzhiyun 	paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	return paddr;
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun /* Convert a machine address to kernel virtual */
m2v(phys_addr_t maddr)1609*4882a593Smuzhiyun static void * __init m2v(phys_addr_t maddr)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun 	return __ka(m2p(maddr));
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun /* Set the page permissions on an identity-mapped pages */
set_page_prot_flags(void * addr,pgprot_t prot,unsigned long flags)1615*4882a593Smuzhiyun static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1616*4882a593Smuzhiyun 				       unsigned long flags)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun 	unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1619*4882a593Smuzhiyun 	pte_t pte = pfn_pte(pfn, prot);
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1622*4882a593Smuzhiyun 		BUG();
1623*4882a593Smuzhiyun }
set_page_prot(void * addr,pgprot_t prot)1624*4882a593Smuzhiyun static void __init set_page_prot(void *addr, pgprot_t prot)
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun 	return set_page_prot_flags(addr, prot, UVMF_NONE);
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun 
xen_setup_machphys_mapping(void)1629*4882a593Smuzhiyun void __init xen_setup_machphys_mapping(void)
1630*4882a593Smuzhiyun {
1631*4882a593Smuzhiyun 	struct xen_machphys_mapping mapping;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1634*4882a593Smuzhiyun 		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1635*4882a593Smuzhiyun 		machine_to_phys_nr = mapping.max_mfn + 1;
1636*4882a593Smuzhiyun 	} else {
1637*4882a593Smuzhiyun 		machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1638*4882a593Smuzhiyun 	}
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun 
convert_pfn_mfn(void * v)1641*4882a593Smuzhiyun static void __init convert_pfn_mfn(void *v)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun 	pte_t *pte = v;
1644*4882a593Smuzhiyun 	int i;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	/* All levels are converted the same way, so just treat them
1647*4882a593Smuzhiyun 	   as ptes. */
1648*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++)
1649*4882a593Smuzhiyun 		pte[i] = xen_make_pte(pte[i].pte);
1650*4882a593Smuzhiyun }
check_pt_base(unsigned long * pt_base,unsigned long * pt_end,unsigned long addr)1651*4882a593Smuzhiyun static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1652*4882a593Smuzhiyun 				 unsigned long addr)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun 	if (*pt_base == PFN_DOWN(__pa(addr))) {
1655*4882a593Smuzhiyun 		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1656*4882a593Smuzhiyun 		clear_page((void *)addr);
1657*4882a593Smuzhiyun 		(*pt_base)++;
1658*4882a593Smuzhiyun 	}
1659*4882a593Smuzhiyun 	if (*pt_end == PFN_DOWN(__pa(addr))) {
1660*4882a593Smuzhiyun 		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1661*4882a593Smuzhiyun 		clear_page((void *)addr);
1662*4882a593Smuzhiyun 		(*pt_end)--;
1663*4882a593Smuzhiyun 	}
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun /*
1666*4882a593Smuzhiyun  * Set up the initial kernel pagetable.
1667*4882a593Smuzhiyun  *
1668*4882a593Smuzhiyun  * We can construct this by grafting the Xen provided pagetable into
1669*4882a593Smuzhiyun  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
1670*4882a593Smuzhiyun  * level2_ident_pgt, and level2_kernel_pgt.  This means that only the
1671*4882a593Smuzhiyun  * kernel has a physical mapping to start with - but that's enough to
1672*4882a593Smuzhiyun  * get __va working.  We need to fill in the rest of the physical
1673*4882a593Smuzhiyun  * mapping once some sort of allocator has been set up.
1674*4882a593Smuzhiyun  */
xen_setup_kernel_pagetable(pgd_t * pgd,unsigned long max_pfn)1675*4882a593Smuzhiyun void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1676*4882a593Smuzhiyun {
1677*4882a593Smuzhiyun 	pud_t *l3;
1678*4882a593Smuzhiyun 	pmd_t *l2;
1679*4882a593Smuzhiyun 	unsigned long addr[3];
1680*4882a593Smuzhiyun 	unsigned long pt_base, pt_end;
1681*4882a593Smuzhiyun 	unsigned i;
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	/* max_pfn_mapped is the last pfn mapped in the initial memory
1684*4882a593Smuzhiyun 	 * mappings. Considering that on Xen after the kernel mappings we
1685*4882a593Smuzhiyun 	 * have the mappings of some pages that don't exist in pfn space, we
1686*4882a593Smuzhiyun 	 * set max_pfn_mapped to the last real pfn mapped. */
1687*4882a593Smuzhiyun 	if (xen_start_info->mfn_list < __START_KERNEL_map)
1688*4882a593Smuzhiyun 		max_pfn_mapped = xen_start_info->first_p2m_pfn;
1689*4882a593Smuzhiyun 	else
1690*4882a593Smuzhiyun 		max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1693*4882a593Smuzhiyun 	pt_end = pt_base + xen_start_info->nr_pt_frames;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	/* Zap identity mapping */
1696*4882a593Smuzhiyun 	init_top_pgt[0] = __pgd(0);
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	/* Pre-constructed entries are in pfn, so convert to mfn */
1699*4882a593Smuzhiyun 	/* L4[273] -> level3_ident_pgt  */
1700*4882a593Smuzhiyun 	/* L4[511] -> level3_kernel_pgt */
1701*4882a593Smuzhiyun 	convert_pfn_mfn(init_top_pgt);
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	/* L3_i[0] -> level2_ident_pgt */
1704*4882a593Smuzhiyun 	convert_pfn_mfn(level3_ident_pgt);
1705*4882a593Smuzhiyun 	/* L3_k[510] -> level2_kernel_pgt */
1706*4882a593Smuzhiyun 	/* L3_k[511] -> level2_fixmap_pgt */
1707*4882a593Smuzhiyun 	convert_pfn_mfn(level3_kernel_pgt);
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	/* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
1710*4882a593Smuzhiyun 	convert_pfn_mfn(level2_fixmap_pgt);
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	/* We get [511][511] and have Xen's version of level2_kernel_pgt */
1713*4882a593Smuzhiyun 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1714*4882a593Smuzhiyun 	l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 	addr[0] = (unsigned long)pgd;
1717*4882a593Smuzhiyun 	addr[1] = (unsigned long)l3;
1718*4882a593Smuzhiyun 	addr[2] = (unsigned long)l2;
1719*4882a593Smuzhiyun 	/* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
1720*4882a593Smuzhiyun 	 * Both L4[273][0] and L4[511][510] have entries that point to the same
1721*4882a593Smuzhiyun 	 * L2 (PMD) tables. Meaning that if you modify it in __va space
1722*4882a593Smuzhiyun 	 * it will be also modified in the __ka space! (But if you just
1723*4882a593Smuzhiyun 	 * modify the PMD table to point to other PTE's or none, then you
1724*4882a593Smuzhiyun 	 * are OK - which is what cleanup_highmap does) */
1725*4882a593Smuzhiyun 	copy_page(level2_ident_pgt, l2);
1726*4882a593Smuzhiyun 	/* Graft it onto L4[511][510] */
1727*4882a593Smuzhiyun 	copy_page(level2_kernel_pgt, l2);
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	/*
1730*4882a593Smuzhiyun 	 * Zap execute permission from the ident map. Due to the sharing of
1731*4882a593Smuzhiyun 	 * L1 entries we need to do this in the L2.
1732*4882a593Smuzhiyun 	 */
1733*4882a593Smuzhiyun 	if (__supported_pte_mask & _PAGE_NX) {
1734*4882a593Smuzhiyun 		for (i = 0; i < PTRS_PER_PMD; ++i) {
1735*4882a593Smuzhiyun 			if (pmd_none(level2_ident_pgt[i]))
1736*4882a593Smuzhiyun 				continue;
1737*4882a593Smuzhiyun 			level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
1738*4882a593Smuzhiyun 		}
1739*4882a593Smuzhiyun 	}
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	/* Copy the initial P->M table mappings if necessary. */
1742*4882a593Smuzhiyun 	i = pgd_index(xen_start_info->mfn_list);
1743*4882a593Smuzhiyun 	if (i && i < pgd_index(__START_KERNEL_map))
1744*4882a593Smuzhiyun 		init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	/* Make pagetable pieces RO */
1747*4882a593Smuzhiyun 	set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
1748*4882a593Smuzhiyun 	set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1749*4882a593Smuzhiyun 	set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1750*4882a593Smuzhiyun 	set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1751*4882a593Smuzhiyun 	set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1752*4882a593Smuzhiyun 	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1753*4882a593Smuzhiyun 	set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	for (i = 0; i < FIXMAP_PMD_NUM; i++) {
1756*4882a593Smuzhiyun 		set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
1757*4882a593Smuzhiyun 			      PAGE_KERNEL_RO);
1758*4882a593Smuzhiyun 	}
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	/* Pin down new L4 */
1761*4882a593Smuzhiyun 	pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1762*4882a593Smuzhiyun 			  PFN_DOWN(__pa_symbol(init_top_pgt)));
1763*4882a593Smuzhiyun 
1764*4882a593Smuzhiyun 	/* Unpin Xen-provided one */
1765*4882a593Smuzhiyun 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	/*
1768*4882a593Smuzhiyun 	 * At this stage there can be no user pgd, and no page structure to
1769*4882a593Smuzhiyun 	 * attach it to, so make sure we just set kernel pgd.
1770*4882a593Smuzhiyun 	 */
1771*4882a593Smuzhiyun 	xen_mc_batch();
1772*4882a593Smuzhiyun 	__xen_write_cr3(true, __pa(init_top_pgt));
1773*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_CPU);
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	/* We can't that easily rip out L3 and L2, as the Xen pagetables are
1776*4882a593Smuzhiyun 	 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ...  for
1777*4882a593Smuzhiyun 	 * the initial domain. For guests using the toolstack, they are in:
1778*4882a593Smuzhiyun 	 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1779*4882a593Smuzhiyun 	 * rip out the [L4] (pgd), but for guests we shave off three pages.
1780*4882a593Smuzhiyun 	 */
1781*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(addr); i++)
1782*4882a593Smuzhiyun 		check_pt_base(&pt_base, &pt_end, addr[i]);
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	/* Our (by three pages) smaller Xen pagetable that we are using */
1785*4882a593Smuzhiyun 	xen_pt_base = PFN_PHYS(pt_base);
1786*4882a593Smuzhiyun 	xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
1787*4882a593Smuzhiyun 	memblock_reserve(xen_pt_base, xen_pt_size);
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	/* Revector the xen_start_info */
1790*4882a593Smuzhiyun 	xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun /*
1794*4882a593Smuzhiyun  * Read a value from a physical address.
1795*4882a593Smuzhiyun  */
xen_read_phys_ulong(phys_addr_t addr)1796*4882a593Smuzhiyun static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
1797*4882a593Smuzhiyun {
1798*4882a593Smuzhiyun 	unsigned long *vaddr;
1799*4882a593Smuzhiyun 	unsigned long val;
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	vaddr = early_memremap_ro(addr, sizeof(val));
1802*4882a593Smuzhiyun 	val = *vaddr;
1803*4882a593Smuzhiyun 	early_memunmap(vaddr, sizeof(val));
1804*4882a593Smuzhiyun 	return val;
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun /*
1808*4882a593Smuzhiyun  * Translate a virtual address to a physical one without relying on mapped
1809*4882a593Smuzhiyun  * page tables. Don't rely on big pages being aligned in (guest) physical
1810*4882a593Smuzhiyun  * space!
1811*4882a593Smuzhiyun  */
xen_early_virt_to_phys(unsigned long vaddr)1812*4882a593Smuzhiyun static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
1813*4882a593Smuzhiyun {
1814*4882a593Smuzhiyun 	phys_addr_t pa;
1815*4882a593Smuzhiyun 	pgd_t pgd;
1816*4882a593Smuzhiyun 	pud_t pud;
1817*4882a593Smuzhiyun 	pmd_t pmd;
1818*4882a593Smuzhiyun 	pte_t pte;
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	pa = read_cr3_pa();
1821*4882a593Smuzhiyun 	pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
1822*4882a593Smuzhiyun 						       sizeof(pgd)));
1823*4882a593Smuzhiyun 	if (!pgd_present(pgd))
1824*4882a593Smuzhiyun 		return 0;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	pa = pgd_val(pgd) & PTE_PFN_MASK;
1827*4882a593Smuzhiyun 	pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
1828*4882a593Smuzhiyun 						       sizeof(pud)));
1829*4882a593Smuzhiyun 	if (!pud_present(pud))
1830*4882a593Smuzhiyun 		return 0;
1831*4882a593Smuzhiyun 	pa = pud_val(pud) & PTE_PFN_MASK;
1832*4882a593Smuzhiyun 	if (pud_large(pud))
1833*4882a593Smuzhiyun 		return pa + (vaddr & ~PUD_MASK);
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 	pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
1836*4882a593Smuzhiyun 						       sizeof(pmd)));
1837*4882a593Smuzhiyun 	if (!pmd_present(pmd))
1838*4882a593Smuzhiyun 		return 0;
1839*4882a593Smuzhiyun 	pa = pmd_val(pmd) & PTE_PFN_MASK;
1840*4882a593Smuzhiyun 	if (pmd_large(pmd))
1841*4882a593Smuzhiyun 		return pa + (vaddr & ~PMD_MASK);
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
1844*4882a593Smuzhiyun 						       sizeof(pte)));
1845*4882a593Smuzhiyun 	if (!pte_present(pte))
1846*4882a593Smuzhiyun 		return 0;
1847*4882a593Smuzhiyun 	pa = pte_pfn(pte) << PAGE_SHIFT;
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	return pa | (vaddr & ~PAGE_MASK);
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun /*
1853*4882a593Smuzhiyun  * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
1854*4882a593Smuzhiyun  * this area.
1855*4882a593Smuzhiyun  */
xen_relocate_p2m(void)1856*4882a593Smuzhiyun void __init xen_relocate_p2m(void)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun 	phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
1859*4882a593Smuzhiyun 	unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
1860*4882a593Smuzhiyun 	int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
1861*4882a593Smuzhiyun 	pte_t *pt;
1862*4882a593Smuzhiyun 	pmd_t *pmd;
1863*4882a593Smuzhiyun 	pud_t *pud;
1864*4882a593Smuzhiyun 	pgd_t *pgd;
1865*4882a593Smuzhiyun 	unsigned long *new_p2m;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1868*4882a593Smuzhiyun 	n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
1869*4882a593Smuzhiyun 	n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
1870*4882a593Smuzhiyun 	n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
1871*4882a593Smuzhiyun 	n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
1872*4882a593Smuzhiyun 	n_frames = n_pte + n_pt + n_pmd + n_pud;
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	new_area = xen_find_free_area(PFN_PHYS(n_frames));
1875*4882a593Smuzhiyun 	if (!new_area) {
1876*4882a593Smuzhiyun 		xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
1877*4882a593Smuzhiyun 		BUG();
1878*4882a593Smuzhiyun 	}
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 	/*
1881*4882a593Smuzhiyun 	 * Setup the page tables for addressing the new p2m list.
1882*4882a593Smuzhiyun 	 * We have asked the hypervisor to map the p2m list at the user address
1883*4882a593Smuzhiyun 	 * PUD_SIZE. It may have done so, or it may have used a kernel space
1884*4882a593Smuzhiyun 	 * address depending on the Xen version.
1885*4882a593Smuzhiyun 	 * To avoid any possible virtual address collision, just use
1886*4882a593Smuzhiyun 	 * 2 * PUD_SIZE for the new area.
1887*4882a593Smuzhiyun 	 */
1888*4882a593Smuzhiyun 	pud_phys = new_area;
1889*4882a593Smuzhiyun 	pmd_phys = pud_phys + PFN_PHYS(n_pud);
1890*4882a593Smuzhiyun 	pt_phys = pmd_phys + PFN_PHYS(n_pmd);
1891*4882a593Smuzhiyun 	p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
1892*4882a593Smuzhiyun 
1893*4882a593Smuzhiyun 	pgd = __va(read_cr3_pa());
1894*4882a593Smuzhiyun 	new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
1895*4882a593Smuzhiyun 	for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
1896*4882a593Smuzhiyun 		pud = early_memremap(pud_phys, PAGE_SIZE);
1897*4882a593Smuzhiyun 		clear_page(pud);
1898*4882a593Smuzhiyun 		for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
1899*4882a593Smuzhiyun 				idx_pmd++) {
1900*4882a593Smuzhiyun 			pmd = early_memremap(pmd_phys, PAGE_SIZE);
1901*4882a593Smuzhiyun 			clear_page(pmd);
1902*4882a593Smuzhiyun 			for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
1903*4882a593Smuzhiyun 					idx_pt++) {
1904*4882a593Smuzhiyun 				pt = early_memremap(pt_phys, PAGE_SIZE);
1905*4882a593Smuzhiyun 				clear_page(pt);
1906*4882a593Smuzhiyun 				for (idx_pte = 0;
1907*4882a593Smuzhiyun 				     idx_pte < min(n_pte, PTRS_PER_PTE);
1908*4882a593Smuzhiyun 				     idx_pte++) {
1909*4882a593Smuzhiyun 					pt[idx_pte] = pfn_pte(p2m_pfn,
1910*4882a593Smuzhiyun 							      PAGE_KERNEL);
1911*4882a593Smuzhiyun 					p2m_pfn++;
1912*4882a593Smuzhiyun 				}
1913*4882a593Smuzhiyun 				n_pte -= PTRS_PER_PTE;
1914*4882a593Smuzhiyun 				early_memunmap(pt, PAGE_SIZE);
1915*4882a593Smuzhiyun 				make_lowmem_page_readonly(__va(pt_phys));
1916*4882a593Smuzhiyun 				pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
1917*4882a593Smuzhiyun 						PFN_DOWN(pt_phys));
1918*4882a593Smuzhiyun 				pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
1919*4882a593Smuzhiyun 				pt_phys += PAGE_SIZE;
1920*4882a593Smuzhiyun 			}
1921*4882a593Smuzhiyun 			n_pt -= PTRS_PER_PMD;
1922*4882a593Smuzhiyun 			early_memunmap(pmd, PAGE_SIZE);
1923*4882a593Smuzhiyun 			make_lowmem_page_readonly(__va(pmd_phys));
1924*4882a593Smuzhiyun 			pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
1925*4882a593Smuzhiyun 					PFN_DOWN(pmd_phys));
1926*4882a593Smuzhiyun 			pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
1927*4882a593Smuzhiyun 			pmd_phys += PAGE_SIZE;
1928*4882a593Smuzhiyun 		}
1929*4882a593Smuzhiyun 		n_pmd -= PTRS_PER_PUD;
1930*4882a593Smuzhiyun 		early_memunmap(pud, PAGE_SIZE);
1931*4882a593Smuzhiyun 		make_lowmem_page_readonly(__va(pud_phys));
1932*4882a593Smuzhiyun 		pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
1933*4882a593Smuzhiyun 		set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
1934*4882a593Smuzhiyun 		pud_phys += PAGE_SIZE;
1935*4882a593Smuzhiyun 	}
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	/* Now copy the old p2m info to the new area. */
1938*4882a593Smuzhiyun 	memcpy(new_p2m, xen_p2m_addr, size);
1939*4882a593Smuzhiyun 	xen_p2m_addr = new_p2m;
1940*4882a593Smuzhiyun 
1941*4882a593Smuzhiyun 	/* Release the old p2m list and set new list info. */
1942*4882a593Smuzhiyun 	p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
1943*4882a593Smuzhiyun 	BUG_ON(!p2m_pfn);
1944*4882a593Smuzhiyun 	p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	if (xen_start_info->mfn_list < __START_KERNEL_map) {
1947*4882a593Smuzhiyun 		pfn = xen_start_info->first_p2m_pfn;
1948*4882a593Smuzhiyun 		pfn_end = xen_start_info->first_p2m_pfn +
1949*4882a593Smuzhiyun 			  xen_start_info->nr_p2m_frames;
1950*4882a593Smuzhiyun 		set_pgd(pgd + 1, __pgd(0));
1951*4882a593Smuzhiyun 	} else {
1952*4882a593Smuzhiyun 		pfn = p2m_pfn;
1953*4882a593Smuzhiyun 		pfn_end = p2m_pfn_end;
1954*4882a593Smuzhiyun 	}
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
1957*4882a593Smuzhiyun 	while (pfn < pfn_end) {
1958*4882a593Smuzhiyun 		if (pfn == p2m_pfn) {
1959*4882a593Smuzhiyun 			pfn = p2m_pfn_end;
1960*4882a593Smuzhiyun 			continue;
1961*4882a593Smuzhiyun 		}
1962*4882a593Smuzhiyun 		make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1963*4882a593Smuzhiyun 		pfn++;
1964*4882a593Smuzhiyun 	}
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1967*4882a593Smuzhiyun 	xen_start_info->first_p2m_pfn =  PFN_DOWN(new_area);
1968*4882a593Smuzhiyun 	xen_start_info->nr_p2m_frames = n_frames;
1969*4882a593Smuzhiyun }
1970*4882a593Smuzhiyun 
xen_reserve_special_pages(void)1971*4882a593Smuzhiyun void __init xen_reserve_special_pages(void)
1972*4882a593Smuzhiyun {
1973*4882a593Smuzhiyun 	phys_addr_t paddr;
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
1976*4882a593Smuzhiyun 	if (xen_start_info->store_mfn) {
1977*4882a593Smuzhiyun 		paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
1978*4882a593Smuzhiyun 		memblock_reserve(paddr, PAGE_SIZE);
1979*4882a593Smuzhiyun 	}
1980*4882a593Smuzhiyun 	if (!xen_initial_domain()) {
1981*4882a593Smuzhiyun 		paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
1982*4882a593Smuzhiyun 		memblock_reserve(paddr, PAGE_SIZE);
1983*4882a593Smuzhiyun 	}
1984*4882a593Smuzhiyun }
1985*4882a593Smuzhiyun 
xen_pt_check_e820(void)1986*4882a593Smuzhiyun void __init xen_pt_check_e820(void)
1987*4882a593Smuzhiyun {
1988*4882a593Smuzhiyun 	if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
1989*4882a593Smuzhiyun 		xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
1990*4882a593Smuzhiyun 		BUG();
1991*4882a593Smuzhiyun 	}
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1995*4882a593Smuzhiyun 
xen_set_fixmap(unsigned idx,phys_addr_t phys,pgprot_t prot)1996*4882a593Smuzhiyun static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1997*4882a593Smuzhiyun {
1998*4882a593Smuzhiyun 	pte_t pte;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	phys >>= PAGE_SHIFT;
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 	switch (idx) {
2003*4882a593Smuzhiyun 	case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2004*4882a593Smuzhiyun #ifdef CONFIG_X86_VSYSCALL_EMULATION
2005*4882a593Smuzhiyun 	case VSYSCALL_PAGE:
2006*4882a593Smuzhiyun #endif
2007*4882a593Smuzhiyun 		/* All local page mappings */
2008*4882a593Smuzhiyun 		pte = pfn_pte(phys, prot);
2009*4882a593Smuzhiyun 		break;
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun #ifdef CONFIG_X86_LOCAL_APIC
2012*4882a593Smuzhiyun 	case FIX_APIC_BASE:	/* maps dummy local APIC */
2013*4882a593Smuzhiyun 		pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2014*4882a593Smuzhiyun 		break;
2015*4882a593Smuzhiyun #endif
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun #ifdef CONFIG_X86_IO_APIC
2018*4882a593Smuzhiyun 	case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2019*4882a593Smuzhiyun 		/*
2020*4882a593Smuzhiyun 		 * We just don't map the IO APIC - all access is via
2021*4882a593Smuzhiyun 		 * hypercalls.  Keep the address in the pte for reference.
2022*4882a593Smuzhiyun 		 */
2023*4882a593Smuzhiyun 		pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2024*4882a593Smuzhiyun 		break;
2025*4882a593Smuzhiyun #endif
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	case FIX_PARAVIRT_BOOTMAP:
2028*4882a593Smuzhiyun 		/* This is an MFN, but it isn't an IO mapping from the
2029*4882a593Smuzhiyun 		   IO domain */
2030*4882a593Smuzhiyun 		pte = mfn_pte(phys, prot);
2031*4882a593Smuzhiyun 		break;
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	default:
2034*4882a593Smuzhiyun 		/* By default, set_fixmap is used for hardware mappings */
2035*4882a593Smuzhiyun 		pte = mfn_pte(phys, prot);
2036*4882a593Smuzhiyun 		break;
2037*4882a593Smuzhiyun 	}
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	__native_set_fixmap(idx, pte);
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun #ifdef CONFIG_X86_VSYSCALL_EMULATION
2042*4882a593Smuzhiyun 	/* Replicate changes to map the vsyscall page into the user
2043*4882a593Smuzhiyun 	   pagetable vsyscall mapping. */
2044*4882a593Smuzhiyun 	if (idx == VSYSCALL_PAGE) {
2045*4882a593Smuzhiyun 		unsigned long vaddr = __fix_to_virt(idx);
2046*4882a593Smuzhiyun 		set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2047*4882a593Smuzhiyun 	}
2048*4882a593Smuzhiyun #endif
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun 
xen_post_allocator_init(void)2051*4882a593Smuzhiyun static void __init xen_post_allocator_init(void)
2052*4882a593Smuzhiyun {
2053*4882a593Smuzhiyun 	pv_ops.mmu.set_pte = xen_set_pte;
2054*4882a593Smuzhiyun 	pv_ops.mmu.set_pmd = xen_set_pmd;
2055*4882a593Smuzhiyun 	pv_ops.mmu.set_pud = xen_set_pud;
2056*4882a593Smuzhiyun 	pv_ops.mmu.set_p4d = xen_set_p4d;
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	/* This will work as long as patching hasn't happened yet
2059*4882a593Smuzhiyun 	   (which it hasn't) */
2060*4882a593Smuzhiyun 	pv_ops.mmu.alloc_pte = xen_alloc_pte;
2061*4882a593Smuzhiyun 	pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
2062*4882a593Smuzhiyun 	pv_ops.mmu.release_pte = xen_release_pte;
2063*4882a593Smuzhiyun 	pv_ops.mmu.release_pmd = xen_release_pmd;
2064*4882a593Smuzhiyun 	pv_ops.mmu.alloc_pud = xen_alloc_pud;
2065*4882a593Smuzhiyun 	pv_ops.mmu.release_pud = xen_release_pud;
2066*4882a593Smuzhiyun 	pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	pv_ops.mmu.write_cr3 = &xen_write_cr3;
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun 
xen_leave_lazy_mmu(void)2071*4882a593Smuzhiyun static void xen_leave_lazy_mmu(void)
2072*4882a593Smuzhiyun {
2073*4882a593Smuzhiyun 	preempt_disable();
2074*4882a593Smuzhiyun 	xen_mc_flush();
2075*4882a593Smuzhiyun 	paravirt_leave_lazy_mmu();
2076*4882a593Smuzhiyun 	preempt_enable();
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2080*4882a593Smuzhiyun 	.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
2081*4882a593Smuzhiyun 	.write_cr2 = xen_write_cr2,
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	.read_cr3 = xen_read_cr3,
2084*4882a593Smuzhiyun 	.write_cr3 = xen_write_cr3_init,
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	.flush_tlb_user = xen_flush_tlb,
2087*4882a593Smuzhiyun 	.flush_tlb_kernel = xen_flush_tlb,
2088*4882a593Smuzhiyun 	.flush_tlb_one_user = xen_flush_tlb_one_user,
2089*4882a593Smuzhiyun 	.flush_tlb_others = xen_flush_tlb_others,
2090*4882a593Smuzhiyun 	.tlb_remove_table = tlb_remove_table,
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	.pgd_alloc = xen_pgd_alloc,
2093*4882a593Smuzhiyun 	.pgd_free = xen_pgd_free,
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	.alloc_pte = xen_alloc_pte_init,
2096*4882a593Smuzhiyun 	.release_pte = xen_release_pte_init,
2097*4882a593Smuzhiyun 	.alloc_pmd = xen_alloc_pmd_init,
2098*4882a593Smuzhiyun 	.release_pmd = xen_release_pmd_init,
2099*4882a593Smuzhiyun 
2100*4882a593Smuzhiyun 	.set_pte = xen_set_pte_init,
2101*4882a593Smuzhiyun 	.set_pmd = xen_set_pmd_hyper,
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 	.ptep_modify_prot_start = __ptep_modify_prot_start,
2104*4882a593Smuzhiyun 	.ptep_modify_prot_commit = __ptep_modify_prot_commit,
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	.pte_val = PV_CALLEE_SAVE(xen_pte_val),
2107*4882a593Smuzhiyun 	.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	.make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2110*4882a593Smuzhiyun 	.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 	.set_pud = xen_set_pud_hyper,
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2115*4882a593Smuzhiyun 	.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 	.pud_val = PV_CALLEE_SAVE(xen_pud_val),
2118*4882a593Smuzhiyun 	.make_pud = PV_CALLEE_SAVE(xen_make_pud),
2119*4882a593Smuzhiyun 	.set_p4d = xen_set_p4d_hyper,
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	.alloc_pud = xen_alloc_pmd_init,
2122*4882a593Smuzhiyun 	.release_pud = xen_release_pmd_init,
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS >= 5
2125*4882a593Smuzhiyun 	.p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
2126*4882a593Smuzhiyun 	.make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
2127*4882a593Smuzhiyun #endif
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	.activate_mm = xen_activate_mm,
2130*4882a593Smuzhiyun 	.dup_mmap = xen_dup_mmap,
2131*4882a593Smuzhiyun 	.exit_mmap = xen_exit_mmap,
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	.lazy_mode = {
2134*4882a593Smuzhiyun 		.enter = paravirt_enter_lazy_mmu,
2135*4882a593Smuzhiyun 		.leave = xen_leave_lazy_mmu,
2136*4882a593Smuzhiyun 		.flush = paravirt_flush_lazy_mmu,
2137*4882a593Smuzhiyun 	},
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	.set_fixmap = xen_set_fixmap,
2140*4882a593Smuzhiyun };
2141*4882a593Smuzhiyun 
xen_init_mmu_ops(void)2142*4882a593Smuzhiyun void __init xen_init_mmu_ops(void)
2143*4882a593Smuzhiyun {
2144*4882a593Smuzhiyun 	x86_init.paging.pagetable_init = xen_pagetable_init;
2145*4882a593Smuzhiyun 	x86_init.hyper.init_after_bootmem = xen_after_bootmem;
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	pv_ops.mmu = xen_mmu_ops;
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	memset(dummy_mapping, 0xff, PAGE_SIZE);
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun /* Protected by xen_reservation_lock. */
2153*4882a593Smuzhiyun #define MAX_CONTIG_ORDER 9 /* 2MB */
2154*4882a593Smuzhiyun static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun #define VOID_PTE (mfn_pte(0, __pgprot(0)))
xen_zap_pfn_range(unsigned long vaddr,unsigned int order,unsigned long * in_frames,unsigned long * out_frames)2157*4882a593Smuzhiyun static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2158*4882a593Smuzhiyun 				unsigned long *in_frames,
2159*4882a593Smuzhiyun 				unsigned long *out_frames)
2160*4882a593Smuzhiyun {
2161*4882a593Smuzhiyun 	int i;
2162*4882a593Smuzhiyun 	struct multicall_space mcs;
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	xen_mc_batch();
2165*4882a593Smuzhiyun 	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2166*4882a593Smuzhiyun 		mcs = __xen_mc_entry(0);
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 		if (in_frames)
2169*4882a593Smuzhiyun 			in_frames[i] = virt_to_mfn(vaddr);
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2172*4882a593Smuzhiyun 		__set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 		if (out_frames)
2175*4882a593Smuzhiyun 			out_frames[i] = virt_to_pfn(vaddr);
2176*4882a593Smuzhiyun 	}
2177*4882a593Smuzhiyun 	xen_mc_issue(0);
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun /*
2181*4882a593Smuzhiyun  * Update the pfn-to-mfn mappings for a virtual address range, either to
2182*4882a593Smuzhiyun  * point to an array of mfns, or contiguously from a single starting
2183*4882a593Smuzhiyun  * mfn.
2184*4882a593Smuzhiyun  */
xen_remap_exchanged_ptes(unsigned long vaddr,int order,unsigned long * mfns,unsigned long first_mfn)2185*4882a593Smuzhiyun static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2186*4882a593Smuzhiyun 				     unsigned long *mfns,
2187*4882a593Smuzhiyun 				     unsigned long first_mfn)
2188*4882a593Smuzhiyun {
2189*4882a593Smuzhiyun 	unsigned i, limit;
2190*4882a593Smuzhiyun 	unsigned long mfn;
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	xen_mc_batch();
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	limit = 1u << order;
2195*4882a593Smuzhiyun 	for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2196*4882a593Smuzhiyun 		struct multicall_space mcs;
2197*4882a593Smuzhiyun 		unsigned flags;
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 		mcs = __xen_mc_entry(0);
2200*4882a593Smuzhiyun 		if (mfns)
2201*4882a593Smuzhiyun 			mfn = mfns[i];
2202*4882a593Smuzhiyun 		else
2203*4882a593Smuzhiyun 			mfn = first_mfn + i;
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 		if (i < (limit - 1))
2206*4882a593Smuzhiyun 			flags = 0;
2207*4882a593Smuzhiyun 		else {
2208*4882a593Smuzhiyun 			if (order == 0)
2209*4882a593Smuzhiyun 				flags = UVMF_INVLPG | UVMF_ALL;
2210*4882a593Smuzhiyun 			else
2211*4882a593Smuzhiyun 				flags = UVMF_TLB_FLUSH | UVMF_ALL;
2212*4882a593Smuzhiyun 		}
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 		MULTI_update_va_mapping(mcs.mc, vaddr,
2215*4882a593Smuzhiyun 				mfn_pte(mfn, PAGE_KERNEL), flags);
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 		set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2218*4882a593Smuzhiyun 	}
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	xen_mc_issue(0);
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun /*
2224*4882a593Smuzhiyun  * Perform the hypercall to exchange a region of our pfns to point to
2225*4882a593Smuzhiyun  * memory with the required contiguous alignment.  Takes the pfns as
2226*4882a593Smuzhiyun  * input, and populates mfns as output.
2227*4882a593Smuzhiyun  *
2228*4882a593Smuzhiyun  * Returns a success code indicating whether the hypervisor was able to
2229*4882a593Smuzhiyun  * satisfy the request or not.
2230*4882a593Smuzhiyun  */
xen_exchange_memory(unsigned long extents_in,unsigned int order_in,unsigned long * pfns_in,unsigned long extents_out,unsigned int order_out,unsigned long * mfns_out,unsigned int address_bits)2231*4882a593Smuzhiyun static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2232*4882a593Smuzhiyun 			       unsigned long *pfns_in,
2233*4882a593Smuzhiyun 			       unsigned long extents_out,
2234*4882a593Smuzhiyun 			       unsigned int order_out,
2235*4882a593Smuzhiyun 			       unsigned long *mfns_out,
2236*4882a593Smuzhiyun 			       unsigned int address_bits)
2237*4882a593Smuzhiyun {
2238*4882a593Smuzhiyun 	long rc;
2239*4882a593Smuzhiyun 	int success;
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	struct xen_memory_exchange exchange = {
2242*4882a593Smuzhiyun 		.in = {
2243*4882a593Smuzhiyun 			.nr_extents   = extents_in,
2244*4882a593Smuzhiyun 			.extent_order = order_in,
2245*4882a593Smuzhiyun 			.extent_start = pfns_in,
2246*4882a593Smuzhiyun 			.domid        = DOMID_SELF
2247*4882a593Smuzhiyun 		},
2248*4882a593Smuzhiyun 		.out = {
2249*4882a593Smuzhiyun 			.nr_extents   = extents_out,
2250*4882a593Smuzhiyun 			.extent_order = order_out,
2251*4882a593Smuzhiyun 			.extent_start = mfns_out,
2252*4882a593Smuzhiyun 			.address_bits = address_bits,
2253*4882a593Smuzhiyun 			.domid        = DOMID_SELF
2254*4882a593Smuzhiyun 		}
2255*4882a593Smuzhiyun 	};
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun 	BUG_ON(extents_in << order_in != extents_out << order_out);
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2260*4882a593Smuzhiyun 	success = (exchange.nr_exchanged == extents_in);
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2263*4882a593Smuzhiyun 	BUG_ON(success && (rc != 0));
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun 	return success;
2266*4882a593Smuzhiyun }
2267*4882a593Smuzhiyun 
xen_create_contiguous_region(phys_addr_t pstart,unsigned int order,unsigned int address_bits,dma_addr_t * dma_handle)2268*4882a593Smuzhiyun int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2269*4882a593Smuzhiyun 				 unsigned int address_bits,
2270*4882a593Smuzhiyun 				 dma_addr_t *dma_handle)
2271*4882a593Smuzhiyun {
2272*4882a593Smuzhiyun 	unsigned long *in_frames = discontig_frames, out_frame;
2273*4882a593Smuzhiyun 	unsigned long  flags;
2274*4882a593Smuzhiyun 	int            success;
2275*4882a593Smuzhiyun 	unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun 	/*
2278*4882a593Smuzhiyun 	 * Currently an auto-translated guest will not perform I/O, nor will
2279*4882a593Smuzhiyun 	 * it require PAE page directories below 4GB. Therefore any calls to
2280*4882a593Smuzhiyun 	 * this function are redundant and can be ignored.
2281*4882a593Smuzhiyun 	 */
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun 	if (unlikely(order > MAX_CONTIG_ORDER))
2284*4882a593Smuzhiyun 		return -ENOMEM;
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun 	memset((void *) vstart, 0, PAGE_SIZE << order);
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun 	spin_lock_irqsave(&xen_reservation_lock, flags);
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 	/* 1. Zap current PTEs, remembering MFNs. */
2291*4882a593Smuzhiyun 	xen_zap_pfn_range(vstart, order, in_frames, NULL);
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 	/* 2. Get a new contiguous memory extent. */
2294*4882a593Smuzhiyun 	out_frame = virt_to_pfn(vstart);
2295*4882a593Smuzhiyun 	success = xen_exchange_memory(1UL << order, 0, in_frames,
2296*4882a593Smuzhiyun 				      1, order, &out_frame,
2297*4882a593Smuzhiyun 				      address_bits);
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 	/* 3. Map the new extent in place of old pages. */
2300*4882a593Smuzhiyun 	if (success)
2301*4882a593Smuzhiyun 		xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2302*4882a593Smuzhiyun 	else
2303*4882a593Smuzhiyun 		xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun 	spin_unlock_irqrestore(&xen_reservation_lock, flags);
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 	*dma_handle = virt_to_machine(vstart).maddr;
2308*4882a593Smuzhiyun 	return success ? 0 : -ENOMEM;
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun 
xen_destroy_contiguous_region(phys_addr_t pstart,unsigned int order)2311*4882a593Smuzhiyun void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2312*4882a593Smuzhiyun {
2313*4882a593Smuzhiyun 	unsigned long *out_frames = discontig_frames, in_frame;
2314*4882a593Smuzhiyun 	unsigned long  flags;
2315*4882a593Smuzhiyun 	int success;
2316*4882a593Smuzhiyun 	unsigned long vstart;
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 	if (unlikely(order > MAX_CONTIG_ORDER))
2319*4882a593Smuzhiyun 		return;
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 	vstart = (unsigned long)phys_to_virt(pstart);
2322*4882a593Smuzhiyun 	memset((void *) vstart, 0, PAGE_SIZE << order);
2323*4882a593Smuzhiyun 
2324*4882a593Smuzhiyun 	spin_lock_irqsave(&xen_reservation_lock, flags);
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	/* 1. Find start MFN of contiguous extent. */
2327*4882a593Smuzhiyun 	in_frame = virt_to_mfn(vstart);
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	/* 2. Zap current PTEs. */
2330*4882a593Smuzhiyun 	xen_zap_pfn_range(vstart, order, NULL, out_frames);
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	/* 3. Do the exchange for non-contiguous MFNs. */
2333*4882a593Smuzhiyun 	success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2334*4882a593Smuzhiyun 					0, out_frames, 0);
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	/* 4. Map new pages in place of old pages. */
2337*4882a593Smuzhiyun 	if (success)
2338*4882a593Smuzhiyun 		xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2339*4882a593Smuzhiyun 	else
2340*4882a593Smuzhiyun 		xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2341*4882a593Smuzhiyun 
2342*4882a593Smuzhiyun 	spin_unlock_irqrestore(&xen_reservation_lock, flags);
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun 
xen_flush_tlb_all(void)2345*4882a593Smuzhiyun static noinline void xen_flush_tlb_all(void)
2346*4882a593Smuzhiyun {
2347*4882a593Smuzhiyun 	struct mmuext_op *op;
2348*4882a593Smuzhiyun 	struct multicall_space mcs;
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun 	preempt_disable();
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 	mcs = xen_mc_entry(sizeof(*op));
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun 	op = mcs.args;
2355*4882a593Smuzhiyun 	op->cmd = MMUEXT_TLB_FLUSH_ALL;
2356*4882a593Smuzhiyun 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 	xen_mc_issue(PARAVIRT_LAZY_MMU);
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	preempt_enable();
2361*4882a593Smuzhiyun }
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun #define REMAP_BATCH_SIZE 16
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun struct remap_data {
2366*4882a593Smuzhiyun 	xen_pfn_t *pfn;
2367*4882a593Smuzhiyun 	bool contiguous;
2368*4882a593Smuzhiyun 	bool no_translate;
2369*4882a593Smuzhiyun 	pgprot_t prot;
2370*4882a593Smuzhiyun 	struct mmu_update *mmu_update;
2371*4882a593Smuzhiyun };
2372*4882a593Smuzhiyun 
remap_area_pfn_pte_fn(pte_t * ptep,unsigned long addr,void * data)2373*4882a593Smuzhiyun static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
2374*4882a593Smuzhiyun {
2375*4882a593Smuzhiyun 	struct remap_data *rmd = data;
2376*4882a593Smuzhiyun 	pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 	/*
2379*4882a593Smuzhiyun 	 * If we have a contiguous range, just update the pfn itself,
2380*4882a593Smuzhiyun 	 * else update pointer to be "next pfn".
2381*4882a593Smuzhiyun 	 */
2382*4882a593Smuzhiyun 	if (rmd->contiguous)
2383*4882a593Smuzhiyun 		(*rmd->pfn)++;
2384*4882a593Smuzhiyun 	else
2385*4882a593Smuzhiyun 		rmd->pfn++;
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 	rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2388*4882a593Smuzhiyun 	rmd->mmu_update->ptr |= rmd->no_translate ?
2389*4882a593Smuzhiyun 		MMU_PT_UPDATE_NO_TRANSLATE :
2390*4882a593Smuzhiyun 		MMU_NORMAL_PT_UPDATE;
2391*4882a593Smuzhiyun 	rmd->mmu_update->val = pte_val_ma(pte);
2392*4882a593Smuzhiyun 	rmd->mmu_update++;
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 	return 0;
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun 
xen_remap_pfn(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * pfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,bool no_translate,struct page ** pages)2397*4882a593Smuzhiyun int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
2398*4882a593Smuzhiyun 		  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
2399*4882a593Smuzhiyun 		  unsigned int domid, bool no_translate, struct page **pages)
2400*4882a593Smuzhiyun {
2401*4882a593Smuzhiyun 	int err = 0;
2402*4882a593Smuzhiyun 	struct remap_data rmd;
2403*4882a593Smuzhiyun 	struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2404*4882a593Smuzhiyun 	unsigned long range;
2405*4882a593Smuzhiyun 	int mapped = 0;
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun 	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 	rmd.pfn = pfn;
2410*4882a593Smuzhiyun 	rmd.prot = prot;
2411*4882a593Smuzhiyun 	/*
2412*4882a593Smuzhiyun 	 * We use the err_ptr to indicate if there we are doing a contiguous
2413*4882a593Smuzhiyun 	 * mapping or a discontigious mapping.
2414*4882a593Smuzhiyun 	 */
2415*4882a593Smuzhiyun 	rmd.contiguous = !err_ptr;
2416*4882a593Smuzhiyun 	rmd.no_translate = no_translate;
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 	while (nr) {
2419*4882a593Smuzhiyun 		int index = 0;
2420*4882a593Smuzhiyun 		int done = 0;
2421*4882a593Smuzhiyun 		int batch = min(REMAP_BATCH_SIZE, nr);
2422*4882a593Smuzhiyun 		int batch_left = batch;
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 		range = (unsigned long)batch << PAGE_SHIFT;
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun 		rmd.mmu_update = mmu_update;
2427*4882a593Smuzhiyun 		err = apply_to_page_range(vma->vm_mm, addr, range,
2428*4882a593Smuzhiyun 					  remap_area_pfn_pte_fn, &rmd);
2429*4882a593Smuzhiyun 		if (err)
2430*4882a593Smuzhiyun 			goto out;
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 		/*
2433*4882a593Smuzhiyun 		 * We record the error for each page that gives an error, but
2434*4882a593Smuzhiyun 		 * continue mapping until the whole set is done
2435*4882a593Smuzhiyun 		 */
2436*4882a593Smuzhiyun 		do {
2437*4882a593Smuzhiyun 			int i;
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 			err = HYPERVISOR_mmu_update(&mmu_update[index],
2440*4882a593Smuzhiyun 						    batch_left, &done, domid);
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 			/*
2443*4882a593Smuzhiyun 			 * @err_ptr may be the same buffer as @gfn, so
2444*4882a593Smuzhiyun 			 * only clear it after each chunk of @gfn is
2445*4882a593Smuzhiyun 			 * used.
2446*4882a593Smuzhiyun 			 */
2447*4882a593Smuzhiyun 			if (err_ptr) {
2448*4882a593Smuzhiyun 				for (i = index; i < index + done; i++)
2449*4882a593Smuzhiyun 					err_ptr[i] = 0;
2450*4882a593Smuzhiyun 			}
2451*4882a593Smuzhiyun 			if (err < 0) {
2452*4882a593Smuzhiyun 				if (!err_ptr)
2453*4882a593Smuzhiyun 					goto out;
2454*4882a593Smuzhiyun 				err_ptr[i] = err;
2455*4882a593Smuzhiyun 				done++; /* Skip failed frame. */
2456*4882a593Smuzhiyun 			} else
2457*4882a593Smuzhiyun 				mapped += done;
2458*4882a593Smuzhiyun 			batch_left -= done;
2459*4882a593Smuzhiyun 			index += done;
2460*4882a593Smuzhiyun 		} while (batch_left);
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 		nr -= batch;
2463*4882a593Smuzhiyun 		addr += range;
2464*4882a593Smuzhiyun 		if (err_ptr)
2465*4882a593Smuzhiyun 			err_ptr += batch;
2466*4882a593Smuzhiyun 		cond_resched();
2467*4882a593Smuzhiyun 	}
2468*4882a593Smuzhiyun out:
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 	xen_flush_tlb_all();
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun 	return err < 0 ? err : mapped;
2473*4882a593Smuzhiyun }
2474*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_remap_pfn);
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun #ifdef CONFIG_KEXEC_CORE
paddr_vmcoreinfo_note(void)2477*4882a593Smuzhiyun phys_addr_t paddr_vmcoreinfo_note(void)
2478*4882a593Smuzhiyun {
2479*4882a593Smuzhiyun 	if (xen_pv_domain())
2480*4882a593Smuzhiyun 		return virt_to_machine(vmcoreinfo_note).maddr;
2481*4882a593Smuzhiyun 	else
2482*4882a593Smuzhiyun 		return __pa(vmcoreinfo_note);
2483*4882a593Smuzhiyun }
2484*4882a593Smuzhiyun #endif /* CONFIG_KEXEC_CORE */
2485