xref: /OK3568_Linux_fs/kernel/include/xen/arm/page.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_ARM_XEN_PAGE_H
3*4882a593Smuzhiyun #define _ASM_ARM_XEN_PAGE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm/page.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/pfn.h>
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <linux/pgtable.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <xen/xen.h>
13*4882a593Smuzhiyun #include <xen/interface/grant_table.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define phys_to_machine_mapping_valid(pfn) (1)
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /* Xen machine address */
18*4882a593Smuzhiyun typedef struct xmaddr {
19*4882a593Smuzhiyun 	phys_addr_t maddr;
20*4882a593Smuzhiyun } xmaddr_t;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* Xen pseudo-physical address */
23*4882a593Smuzhiyun typedef struct xpaddr {
24*4882a593Smuzhiyun 	phys_addr_t paddr;
25*4882a593Smuzhiyun } xpaddr_t;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
28*4882a593Smuzhiyun #define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define INVALID_P2M_ENTRY      (~0UL)
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * The pseudo-physical frame (pfn) used in all the helpers is always based
34*4882a593Smuzhiyun  * on Xen page granularity (i.e 4KB).
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * A Linux page may be split across multiple non-contiguous Xen page so we
37*4882a593Smuzhiyun  * have to keep track with frame based on 4KB page granularity.
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * PV drivers should never make a direct usage of those helpers (particularly
40*4882a593Smuzhiyun  * pfn_to_gfn and gfn_to_pfn).
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun unsigned long __pfn_to_mfn(unsigned long pfn);
44*4882a593Smuzhiyun extern struct rb_root phys_to_mach;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* Pseudo-physical <-> Guest conversion */
pfn_to_gfn(unsigned long pfn)47*4882a593Smuzhiyun static inline unsigned long pfn_to_gfn(unsigned long pfn)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return pfn;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
gfn_to_pfn(unsigned long gfn)52*4882a593Smuzhiyun static inline unsigned long gfn_to_pfn(unsigned long gfn)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return gfn;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* Pseudo-physical <-> BUS conversion */
pfn_to_bfn(unsigned long pfn)58*4882a593Smuzhiyun static inline unsigned long pfn_to_bfn(unsigned long pfn)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	unsigned long mfn;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (phys_to_mach.rb_node != NULL) {
63*4882a593Smuzhiyun 		mfn = __pfn_to_mfn(pfn);
64*4882a593Smuzhiyun 		if (mfn != INVALID_P2M_ENTRY)
65*4882a593Smuzhiyun 			return mfn;
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	return pfn;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
bfn_to_pfn(unsigned long bfn)71*4882a593Smuzhiyun static inline unsigned long bfn_to_pfn(unsigned long bfn)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	return bfn;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define bfn_to_local_pfn(bfn)	bfn_to_pfn(bfn)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* VIRT <-> GUEST conversion */
79*4882a593Smuzhiyun #define virt_to_gfn(v)                                                         \
80*4882a593Smuzhiyun 	({                                                                     \
81*4882a593Smuzhiyun 		WARN_ON_ONCE(!virt_addr_valid(v));                              \
82*4882a593Smuzhiyun 		pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT);                 \
83*4882a593Smuzhiyun 	})
84*4882a593Smuzhiyun #define gfn_to_virt(m)		(__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define percpu_to_gfn(v)	\
87*4882a593Smuzhiyun 	(pfn_to_gfn(per_cpu_ptr_to_phys(v) >> XEN_PAGE_SHIFT))
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* Only used in PV code. But ARM guests are always HVM. */
arbitrary_virt_to_machine(void * vaddr)90*4882a593Smuzhiyun static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	BUG();
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
96*4882a593Smuzhiyun 				   struct gnttab_map_grant_ref *kmap_ops,
97*4882a593Smuzhiyun 				   struct page **pages, unsigned int count);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
100*4882a593Smuzhiyun 				     struct gnttab_unmap_grant_ref *kunmap_ops,
101*4882a593Smuzhiyun 				     struct page **pages, unsigned int count);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
104*4882a593Smuzhiyun bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
105*4882a593Smuzhiyun 		unsigned long nr_pages);
106*4882a593Smuzhiyun 
set_phys_to_machine(unsigned long pfn,unsigned long mfn)107*4882a593Smuzhiyun static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	return __set_phys_to_machine(pfn, mfn);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define xen_remap(cookie, size) ioremap_cache((cookie), (size))
113*4882a593Smuzhiyun #define xen_unmap(cookie) iounmap((cookie))
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun bool xen_arch_need_swiotlb(struct device *dev,
116*4882a593Smuzhiyun 			   phys_addr_t phys,
117*4882a593Smuzhiyun 			   dma_addr_t dev_addr);
118*4882a593Smuzhiyun unsigned long xen_get_swiotlb_free_pages(unsigned int order);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #endif /* _ASM_ARM_XEN_PAGE_H */
121