xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/xen/page.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_XEN_PAGE_H
3*4882a593Smuzhiyun #define _ASM_X86_XEN_PAGE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/types.h>
7*4882a593Smuzhiyun #include <linux/spinlock.h>
8*4882a593Smuzhiyun #include <linux/pfn.h>
9*4882a593Smuzhiyun #include <linux/mm.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/extable.h>
13*4882a593Smuzhiyun #include <asm/page.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <xen/interface/xen.h>
16*4882a593Smuzhiyun #include <xen/interface/grant_table.h>
17*4882a593Smuzhiyun #include <xen/features.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* Xen machine address */
20*4882a593Smuzhiyun typedef struct xmaddr {
21*4882a593Smuzhiyun 	phys_addr_t maddr;
22*4882a593Smuzhiyun } xmaddr_t;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* Xen pseudo-physical address */
25*4882a593Smuzhiyun typedef struct xpaddr {
26*4882a593Smuzhiyun 	phys_addr_t paddr;
27*4882a593Smuzhiyun } xpaddr_t;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #ifdef CONFIG_X86_64
30*4882a593Smuzhiyun #define XEN_PHYSICAL_MASK	__sme_clr((1UL << 52) - 1)
31*4882a593Smuzhiyun #else
32*4882a593Smuzhiyun #define XEN_PHYSICAL_MASK	__PHYSICAL_MASK
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define XEN_PTE_MFN_MASK	((pteval_t)(((signed long)PAGE_MASK) & \
36*4882a593Smuzhiyun 					    XEN_PHYSICAL_MASK))
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
39*4882a593Smuzhiyun #define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
42*4882a593Smuzhiyun #define INVALID_P2M_ENTRY	(~0UL)
43*4882a593Smuzhiyun #define FOREIGN_FRAME_BIT	(1UL<<(BITS_PER_LONG-1))
44*4882a593Smuzhiyun #define IDENTITY_FRAME_BIT	(1UL<<(BITS_PER_LONG-2))
45*4882a593Smuzhiyun #define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
46*4882a593Smuzhiyun #define IDENTITY_FRAME(m)	((m) | IDENTITY_FRAME_BIT)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun extern unsigned long *machine_to_phys_mapping;
51*4882a593Smuzhiyun extern unsigned long  machine_to_phys_nr;
52*4882a593Smuzhiyun extern unsigned long *xen_p2m_addr;
53*4882a593Smuzhiyun extern unsigned long  xen_p2m_size;
54*4882a593Smuzhiyun extern unsigned long  xen_max_p2m_pfn;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun extern int xen_alloc_p2m_entry(unsigned long pfn);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun extern unsigned long get_phys_to_machine(unsigned long pfn);
59*4882a593Smuzhiyun extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
60*4882a593Smuzhiyun extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
61*4882a593Smuzhiyun extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
62*4882a593Smuzhiyun 						    unsigned long pfn_e);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #ifdef CONFIG_XEN_PV
65*4882a593Smuzhiyun extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
66*4882a593Smuzhiyun 				   struct gnttab_map_grant_ref *kmap_ops,
67*4882a593Smuzhiyun 				   struct page **pages, unsigned int count);
68*4882a593Smuzhiyun extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
69*4882a593Smuzhiyun 				     struct gnttab_unmap_grant_ref *kunmap_ops,
70*4882a593Smuzhiyun 				     struct page **pages, unsigned int count);
71*4882a593Smuzhiyun #else
72*4882a593Smuzhiyun static inline int
set_foreign_p2m_mapping(struct gnttab_map_grant_ref * map_ops,struct gnttab_map_grant_ref * kmap_ops,struct page ** pages,unsigned int count)73*4882a593Smuzhiyun set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
74*4882a593Smuzhiyun 			struct gnttab_map_grant_ref *kmap_ops,
75*4882a593Smuzhiyun 			struct page **pages, unsigned int count)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	return 0;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static inline int
clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref * unmap_ops,struct gnttab_unmap_grant_ref * kunmap_ops,struct page ** pages,unsigned int count)81*4882a593Smuzhiyun clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
82*4882a593Smuzhiyun 			  struct gnttab_unmap_grant_ref *kunmap_ops,
83*4882a593Smuzhiyun 			  struct page **pages, unsigned int count)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	return 0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Helper functions to write or read unsigned long values to/from
91*4882a593Smuzhiyun  * memory, when the access may fault.
92*4882a593Smuzhiyun  */
xen_safe_write_ulong(unsigned long * addr,unsigned long val)93*4882a593Smuzhiyun static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	int ret = 0;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	asm volatile("1: mov %[val], %[ptr]\n"
98*4882a593Smuzhiyun 		     "2:\n"
99*4882a593Smuzhiyun 		     ".section .fixup, \"ax\"\n"
100*4882a593Smuzhiyun 		     "3: sub $1, %[ret]\n"
101*4882a593Smuzhiyun 		     "   jmp 2b\n"
102*4882a593Smuzhiyun 		     ".previous\n"
103*4882a593Smuzhiyun 		     _ASM_EXTABLE(1b, 3b)
104*4882a593Smuzhiyun 		     : [ret] "+r" (ret), [ptr] "=m" (*addr)
105*4882a593Smuzhiyun 		     : [val] "r" (val));
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return ret;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
xen_safe_read_ulong(const unsigned long * addr,unsigned long * val)110*4882a593Smuzhiyun static inline int xen_safe_read_ulong(const unsigned long *addr,
111*4882a593Smuzhiyun 				      unsigned long *val)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	int ret = 0;
114*4882a593Smuzhiyun 	unsigned long rval = ~0ul;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	asm volatile("1: mov %[ptr], %[rval]\n"
117*4882a593Smuzhiyun 		     "2:\n"
118*4882a593Smuzhiyun 		     ".section .fixup, \"ax\"\n"
119*4882a593Smuzhiyun 		     "3: sub $1, %[ret]\n"
120*4882a593Smuzhiyun 		     "   jmp 2b\n"
121*4882a593Smuzhiyun 		     ".previous\n"
122*4882a593Smuzhiyun 		     _ASM_EXTABLE(1b, 3b)
123*4882a593Smuzhiyun 		     : [ret] "+r" (ret), [rval] "+r" (rval)
124*4882a593Smuzhiyun 		     : [ptr] "m" (*addr));
125*4882a593Smuzhiyun 	*val = rval;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	return ret;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun #ifdef CONFIG_XEN_PV
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun  * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
133*4882a593Smuzhiyun  * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
134*4882a593Smuzhiyun  *   bits (identity or foreign) are set.
135*4882a593Smuzhiyun  * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
136*4882a593Smuzhiyun  *   identity or foreign indicator will be still set. __pfn_to_mfn() is
137*4882a593Smuzhiyun  *   encapsulating get_phys_to_machine() which is called in special cases only.
138*4882a593Smuzhiyun  * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
139*4882a593Smuzhiyun  *   cases needing an extended handling.
140*4882a593Smuzhiyun  */
__pfn_to_mfn(unsigned long pfn)141*4882a593Smuzhiyun static inline unsigned long __pfn_to_mfn(unsigned long pfn)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	unsigned long mfn;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (pfn < xen_p2m_size)
146*4882a593Smuzhiyun 		mfn = xen_p2m_addr[pfn];
147*4882a593Smuzhiyun 	else if (unlikely(pfn < xen_max_p2m_pfn))
148*4882a593Smuzhiyun 		return get_phys_to_machine(pfn);
149*4882a593Smuzhiyun 	else
150*4882a593Smuzhiyun 		return IDENTITY_FRAME(pfn);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (unlikely(mfn == INVALID_P2M_ENTRY))
153*4882a593Smuzhiyun 		return get_phys_to_machine(pfn);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return mfn;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun #else
__pfn_to_mfn(unsigned long pfn)158*4882a593Smuzhiyun static inline unsigned long __pfn_to_mfn(unsigned long pfn)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return pfn;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun #endif
163*4882a593Smuzhiyun 
pfn_to_mfn(unsigned long pfn)164*4882a593Smuzhiyun static inline unsigned long pfn_to_mfn(unsigned long pfn)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	unsigned long mfn;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/*
169*4882a593Smuzhiyun 	 * Some x86 code are still using pfn_to_mfn instead of
170*4882a593Smuzhiyun 	 * pfn_to_mfn. This will have to be removed when we figured
171*4882a593Smuzhiyun 	 * out which call.
172*4882a593Smuzhiyun 	 */
173*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
174*4882a593Smuzhiyun 		return pfn;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	mfn = __pfn_to_mfn(pfn);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (mfn != INVALID_P2M_ENTRY)
179*4882a593Smuzhiyun 		mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	return mfn;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
phys_to_machine_mapping_valid(unsigned long pfn)184*4882a593Smuzhiyun static inline int phys_to_machine_mapping_valid(unsigned long pfn)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
187*4882a593Smuzhiyun 		return 1;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
mfn_to_pfn_no_overrides(unsigned long mfn)192*4882a593Smuzhiyun static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	unsigned long pfn;
195*4882a593Smuzhiyun 	int ret;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (unlikely(mfn >= machine_to_phys_nr))
198*4882a593Smuzhiyun 		return ~0;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/*
201*4882a593Smuzhiyun 	 * The array access can fail (e.g., device space beyond end of RAM).
202*4882a593Smuzhiyun 	 * In such cases it doesn't matter what we return (we return garbage),
203*4882a593Smuzhiyun 	 * but we must handle the fault without crashing!
204*4882a593Smuzhiyun 	 */
205*4882a593Smuzhiyun 	ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
206*4882a593Smuzhiyun 	if (ret < 0)
207*4882a593Smuzhiyun 		return ~0;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return pfn;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
mfn_to_pfn(unsigned long mfn)212*4882a593Smuzhiyun static inline unsigned long mfn_to_pfn(unsigned long mfn)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	unsigned long pfn;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/*
217*4882a593Smuzhiyun 	 * Some x86 code are still using mfn_to_pfn instead of
218*4882a593Smuzhiyun 	 * gfn_to_pfn. This will have to be removed when we figure
219*4882a593Smuzhiyun 	 * out which call.
220*4882a593Smuzhiyun 	 */
221*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
222*4882a593Smuzhiyun 		return mfn;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	pfn = mfn_to_pfn_no_overrides(mfn);
225*4882a593Smuzhiyun 	if (__pfn_to_mfn(pfn) != mfn)
226*4882a593Smuzhiyun 		pfn = ~0;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/*
229*4882a593Smuzhiyun 	 * pfn is ~0 if there are no entries in the m2p for mfn or the
230*4882a593Smuzhiyun 	 * entry doesn't map back to the mfn.
231*4882a593Smuzhiyun 	 */
232*4882a593Smuzhiyun 	if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
233*4882a593Smuzhiyun 		pfn = mfn;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	return pfn;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
phys_to_machine(xpaddr_t phys)238*4882a593Smuzhiyun static inline xmaddr_t phys_to_machine(xpaddr_t phys)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	unsigned offset = phys.paddr & ~PAGE_MASK;
241*4882a593Smuzhiyun 	return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
machine_to_phys(xmaddr_t machine)244*4882a593Smuzhiyun static inline xpaddr_t machine_to_phys(xmaddr_t machine)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	unsigned offset = machine.maddr & ~PAGE_MASK;
247*4882a593Smuzhiyun 	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* Pseudo-physical <-> Guest conversion */
pfn_to_gfn(unsigned long pfn)251*4882a593Smuzhiyun static inline unsigned long pfn_to_gfn(unsigned long pfn)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
254*4882a593Smuzhiyun 		return pfn;
255*4882a593Smuzhiyun 	else
256*4882a593Smuzhiyun 		return pfn_to_mfn(pfn);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
gfn_to_pfn(unsigned long gfn)259*4882a593Smuzhiyun static inline unsigned long gfn_to_pfn(unsigned long gfn)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
262*4882a593Smuzhiyun 		return gfn;
263*4882a593Smuzhiyun 	else
264*4882a593Smuzhiyun 		return mfn_to_pfn(gfn);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /* Pseudo-physical <-> Bus conversion */
268*4882a593Smuzhiyun #define pfn_to_bfn(pfn)		pfn_to_gfn(pfn)
269*4882a593Smuzhiyun #define bfn_to_pfn(bfn)		gfn_to_pfn(bfn)
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun  * We detect special mappings in one of two ways:
273*4882a593Smuzhiyun  *  1. If the MFN is an I/O page then Xen will set the m2p entry
274*4882a593Smuzhiyun  *     to be outside our maximum possible pseudophys range.
275*4882a593Smuzhiyun  *  2. If the MFN belongs to a different domain then we will certainly
276*4882a593Smuzhiyun  *     not have MFN in our p2m table. Conversely, if the page is ours,
277*4882a593Smuzhiyun  *     then we'll have p2m(m2p(MFN))==MFN.
278*4882a593Smuzhiyun  * If we detect a special mapping then it doesn't have a 'struct page'.
279*4882a593Smuzhiyun  * We force !pfn_valid() by returning an out-of-range pointer.
280*4882a593Smuzhiyun  *
281*4882a593Smuzhiyun  * NB. These checks require that, for any MFN that is not in our reservation,
282*4882a593Smuzhiyun  * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
283*4882a593Smuzhiyun  * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
284*4882a593Smuzhiyun  * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
287*4882a593Smuzhiyun  *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
288*4882a593Smuzhiyun  *      require. In all the cases we care about, the FOREIGN_FRAME bit is
289*4882a593Smuzhiyun  *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
290*4882a593Smuzhiyun  */
bfn_to_local_pfn(unsigned long mfn)291*4882a593Smuzhiyun static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	unsigned long pfn;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
296*4882a593Smuzhiyun 		return mfn;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	pfn = mfn_to_pfn(mfn);
299*4882a593Smuzhiyun 	if (__pfn_to_mfn(pfn) != mfn)
300*4882a593Smuzhiyun 		return -1; /* force !pfn_valid() */
301*4882a593Smuzhiyun 	return pfn;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /* VIRT <-> MACHINE conversion */
305*4882a593Smuzhiyun #define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v))))
306*4882a593Smuzhiyun #define virt_to_pfn(v)          (PFN_DOWN(__pa(v)))
307*4882a593Smuzhiyun #define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
308*4882a593Smuzhiyun #define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /* VIRT <-> GUEST conversion */
311*4882a593Smuzhiyun #define virt_to_gfn(v)		(pfn_to_gfn(virt_to_pfn(v)))
312*4882a593Smuzhiyun #define gfn_to_virt(g)		(__va(gfn_to_pfn(g) << PAGE_SHIFT))
313*4882a593Smuzhiyun 
pte_mfn(pte_t pte)314*4882a593Smuzhiyun static inline unsigned long pte_mfn(pte_t pte)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	return (pte.pte & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
mfn_pte(unsigned long page_nr,pgprot_t pgprot)319*4882a593Smuzhiyun static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	pte_t pte;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
324*4882a593Smuzhiyun 			massage_pgprot(pgprot);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	return pte;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
pte_val_ma(pte_t pte)329*4882a593Smuzhiyun static inline pteval_t pte_val_ma(pte_t pte)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	return pte.pte;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
__pte_ma(pteval_t x)334*4882a593Smuzhiyun static inline pte_t __pte_ma(pteval_t x)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	return (pte_t) { .pte = x };
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun #define pmd_val_ma(v) ((v).pmd)
340*4882a593Smuzhiyun #ifdef __PAGETABLE_PUD_FOLDED
341*4882a593Smuzhiyun #define pud_val_ma(v) ((v).p4d.pgd.pgd)
342*4882a593Smuzhiyun #else
343*4882a593Smuzhiyun #define pud_val_ma(v) ((v).pud)
344*4882a593Smuzhiyun #endif
345*4882a593Smuzhiyun #define __pmd_ma(x)	((pmd_t) { (x) } )
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun #ifdef __PAGETABLE_P4D_FOLDED
348*4882a593Smuzhiyun #define p4d_val_ma(x)	((x).pgd.pgd)
349*4882a593Smuzhiyun #else
350*4882a593Smuzhiyun #define p4d_val_ma(x)	((x).p4d)
351*4882a593Smuzhiyun #endif
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun xmaddr_t arbitrary_virt_to_machine(void *address);
354*4882a593Smuzhiyun unsigned long arbitrary_virt_to_mfn(void *vaddr);
355*4882a593Smuzhiyun void make_lowmem_page_readonly(void *vaddr);
356*4882a593Smuzhiyun void make_lowmem_page_readwrite(void *vaddr);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun #define xen_remap(cookie, size) ioremap((cookie), (size));
359*4882a593Smuzhiyun #define xen_unmap(cookie) iounmap((cookie))
360*4882a593Smuzhiyun 
xen_arch_need_swiotlb(struct device * dev,phys_addr_t phys,dma_addr_t dev_addr)361*4882a593Smuzhiyun static inline bool xen_arch_need_swiotlb(struct device *dev,
362*4882a593Smuzhiyun 					 phys_addr_t phys,
363*4882a593Smuzhiyun 					 dma_addr_t dev_addr)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	return false;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
xen_get_swiotlb_free_pages(unsigned int order)368*4882a593Smuzhiyun static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	return __get_free_pages(__GFP_NOWARN, order);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun #endif /* _ASM_X86_XEN_PAGE_H */
374