xref: /OK3568_Linux_fs/kernel/include/xen/xen-ops.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef INCLUDE_XEN_OPS_H
3*4882a593Smuzhiyun #define INCLUDE_XEN_OPS_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/percpu.h>
6*4882a593Smuzhiyun #include <linux/notifier.h>
7*4882a593Smuzhiyun #include <linux/efi.h>
8*4882a593Smuzhiyun #include <xen/features.h>
9*4882a593Smuzhiyun #include <asm/xen/interface.h>
10*4882a593Smuzhiyun #include <xen/interface/vcpu.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
xen_vcpu_nr(int cpu)15*4882a593Smuzhiyun static inline uint32_t xen_vcpu_nr(int cpu)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	return per_cpu(xen_vcpu_id, cpu);
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define XEN_VCPU_ID_INVALID U32_MAX
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun void xen_arch_pre_suspend(void);
23*4882a593Smuzhiyun void xen_arch_post_suspend(int suspend_cancelled);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun void xen_timer_resume(void);
26*4882a593Smuzhiyun void xen_arch_resume(void);
27*4882a593Smuzhiyun void xen_arch_suspend(void);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun void xen_reboot(int reason);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun void xen_resume_notifier_register(struct notifier_block *nb);
32*4882a593Smuzhiyun void xen_resume_notifier_unregister(struct notifier_block *nb);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun bool xen_vcpu_stolen(int vcpu);
35*4882a593Smuzhiyun void xen_setup_runstate_info(int cpu);
36*4882a593Smuzhiyun void xen_time_setup_guest(void);
37*4882a593Smuzhiyun void xen_manage_runstate_time(int action);
38*4882a593Smuzhiyun void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
39*4882a593Smuzhiyun u64 xen_steal_clock(int cpu);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun int xen_setup_shutdown_event(void);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun extern unsigned long *xen_contiguous_bitmap;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
46*4882a593Smuzhiyun int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
47*4882a593Smuzhiyun 				unsigned int address_bits,
48*4882a593Smuzhiyun 				dma_addr_t *dma_handle);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
51*4882a593Smuzhiyun #else
xen_create_contiguous_region(phys_addr_t pstart,unsigned int order,unsigned int address_bits,dma_addr_t * dma_handle)52*4882a593Smuzhiyun static inline int xen_create_contiguous_region(phys_addr_t pstart,
53*4882a593Smuzhiyun 					       unsigned int order,
54*4882a593Smuzhiyun 					       unsigned int address_bits,
55*4882a593Smuzhiyun 					       dma_addr_t *dma_handle)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	return 0;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
xen_destroy_contiguous_region(phys_addr_t pstart,unsigned int order)60*4882a593Smuzhiyun static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
61*4882a593Smuzhiyun 						 unsigned int order) { }
62*4882a593Smuzhiyun #endif
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #if defined(CONFIG_XEN_PV)
65*4882a593Smuzhiyun int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
66*4882a593Smuzhiyun 		  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
67*4882a593Smuzhiyun 		  unsigned int domid, bool no_translate, struct page **pages);
68*4882a593Smuzhiyun #else
xen_remap_pfn(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * pfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,bool no_translate,struct page ** pages)69*4882a593Smuzhiyun static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
70*4882a593Smuzhiyun 				xen_pfn_t *pfn, int nr, int *err_ptr,
71*4882a593Smuzhiyun 				pgprot_t prot,  unsigned int domid,
72*4882a593Smuzhiyun 				bool no_translate, struct page **pages)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	BUG();
75*4882a593Smuzhiyun 	return 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun struct vm_area_struct;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #ifdef CONFIG_XEN_AUTO_XLATE
82*4882a593Smuzhiyun int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
83*4882a593Smuzhiyun 			      unsigned long addr,
84*4882a593Smuzhiyun 			      xen_pfn_t *gfn, int nr,
85*4882a593Smuzhiyun 			      int *err_ptr, pgprot_t prot,
86*4882a593Smuzhiyun 			      unsigned int domid,
87*4882a593Smuzhiyun 			      struct page **pages);
88*4882a593Smuzhiyun int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
89*4882a593Smuzhiyun 			      int nr, struct page **pages);
90*4882a593Smuzhiyun #else
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun  * These two functions are called from arch/x86/xen/mmu.c and so stubs
93*4882a593Smuzhiyun  * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
94*4882a593Smuzhiyun  */
xen_xlate_remap_gfn_array(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * gfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,struct page ** pages)95*4882a593Smuzhiyun static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
96*4882a593Smuzhiyun 					    unsigned long addr,
97*4882a593Smuzhiyun 					    xen_pfn_t *gfn, int nr,
98*4882a593Smuzhiyun 					    int *err_ptr, pgprot_t prot,
99*4882a593Smuzhiyun 					    unsigned int domid,
100*4882a593Smuzhiyun 					    struct page **pages)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	return -EOPNOTSUPP;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
xen_xlate_unmap_gfn_range(struct vm_area_struct * vma,int nr,struct page ** pages)105*4882a593Smuzhiyun static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
106*4882a593Smuzhiyun 					    int nr, struct page **pages)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	return -EOPNOTSUPP;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun #endif
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
113*4882a593Smuzhiyun 			unsigned long len);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun  * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
117*4882a593Smuzhiyun  * @vma:     VMA to map the pages into
118*4882a593Smuzhiyun  * @addr:    Address at which to map the pages
119*4882a593Smuzhiyun  * @gfn:     Array of GFNs to map
120*4882a593Smuzhiyun  * @nr:      Number entries in the GFN array
121*4882a593Smuzhiyun  * @err_ptr: Returns per-GFN error status.
122*4882a593Smuzhiyun  * @prot:    page protection mask
123*4882a593Smuzhiyun  * @domid:   Domain owning the pages
124*4882a593Smuzhiyun  * @pages:   Array of pages if this domain has an auto-translated physmap
125*4882a593Smuzhiyun  *
126*4882a593Smuzhiyun  * @gfn and @err_ptr may point to the same buffer, the GFNs will be
127*4882a593Smuzhiyun  * overwritten by the error codes after they are mapped.
128*4882a593Smuzhiyun  *
129*4882a593Smuzhiyun  * Returns the number of successfully mapped frames, or a -ve error
130*4882a593Smuzhiyun  * code.
131*4882a593Smuzhiyun  */
xen_remap_domain_gfn_array(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * gfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,struct page ** pages)132*4882a593Smuzhiyun static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
133*4882a593Smuzhiyun 					     unsigned long addr,
134*4882a593Smuzhiyun 					     xen_pfn_t *gfn, int nr,
135*4882a593Smuzhiyun 					     int *err_ptr, pgprot_t prot,
136*4882a593Smuzhiyun 					     unsigned int domid,
137*4882a593Smuzhiyun 					     struct page **pages)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
140*4882a593Smuzhiyun 		return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
141*4882a593Smuzhiyun 						 prot, domid, pages);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
144*4882a593Smuzhiyun 	 * and the consequences later is quite hard to detect what the actual
145*4882a593Smuzhiyun 	 * cause of "wrong memory was mapped in".
146*4882a593Smuzhiyun 	 */
147*4882a593Smuzhiyun 	BUG_ON(err_ptr == NULL);
148*4882a593Smuzhiyun 	return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
149*4882a593Smuzhiyun 			     false, pages);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun  * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
154*4882a593Smuzhiyun  * @vma:     VMA to map the pages into
155*4882a593Smuzhiyun  * @addr:    Address at which to map the pages
156*4882a593Smuzhiyun  * @mfn:     Array of MFNs to map
157*4882a593Smuzhiyun  * @nr:      Number entries in the MFN array
158*4882a593Smuzhiyun  * @err_ptr: Returns per-MFN error status.
159*4882a593Smuzhiyun  * @prot:    page protection mask
160*4882a593Smuzhiyun  * @domid:   Domain owning the pages
161*4882a593Smuzhiyun  * @pages:   Array of pages if this domain has an auto-translated physmap
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  * @mfn and @err_ptr may point to the same buffer, the MFNs will be
164*4882a593Smuzhiyun  * overwritten by the error codes after they are mapped.
165*4882a593Smuzhiyun  *
166*4882a593Smuzhiyun  * Returns the number of successfully mapped frames, or a -ve error
167*4882a593Smuzhiyun  * code.
168*4882a593Smuzhiyun  */
xen_remap_domain_mfn_array(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * mfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,struct page ** pages)169*4882a593Smuzhiyun static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
170*4882a593Smuzhiyun 					     unsigned long addr, xen_pfn_t *mfn,
171*4882a593Smuzhiyun 					     int nr, int *err_ptr,
172*4882a593Smuzhiyun 					     pgprot_t prot, unsigned int domid,
173*4882a593Smuzhiyun 					     struct page **pages)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
176*4882a593Smuzhiyun 		return -EOPNOTSUPP;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
179*4882a593Smuzhiyun 			     true, pages);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /* xen_remap_domain_gfn_range() - map a range of foreign frames
183*4882a593Smuzhiyun  * @vma:     VMA to map the pages into
184*4882a593Smuzhiyun  * @addr:    Address at which to map the pages
185*4882a593Smuzhiyun  * @gfn:     First GFN to map.
186*4882a593Smuzhiyun  * @nr:      Number frames to map
187*4882a593Smuzhiyun  * @prot:    page protection mask
188*4882a593Smuzhiyun  * @domid:   Domain owning the pages
189*4882a593Smuzhiyun  * @pages:   Array of pages if this domain has an auto-translated physmap
190*4882a593Smuzhiyun  *
191*4882a593Smuzhiyun  * Returns the number of successfully mapped frames, or a -ve error
192*4882a593Smuzhiyun  * code.
193*4882a593Smuzhiyun  */
xen_remap_domain_gfn_range(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t gfn,int nr,pgprot_t prot,unsigned int domid,struct page ** pages)194*4882a593Smuzhiyun static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
195*4882a593Smuzhiyun 					     unsigned long addr,
196*4882a593Smuzhiyun 					     xen_pfn_t gfn, int nr,
197*4882a593Smuzhiyun 					     pgprot_t prot, unsigned int domid,
198*4882a593Smuzhiyun 					     struct page **pages)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	if (xen_feature(XENFEAT_auto_translated_physmap))
201*4882a593Smuzhiyun 		return -EOPNOTSUPP;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
204*4882a593Smuzhiyun 			     pages);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
208*4882a593Smuzhiyun 			       int numpgs, struct page **pages);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
211*4882a593Smuzhiyun 				  unsigned long nr_grant_frames);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun void xen_efi_runtime_setup(void);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun #if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION)
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
221*4882a593Smuzhiyun 
xen_preemptible_hcall_begin(void)222*4882a593Smuzhiyun static inline void xen_preemptible_hcall_begin(void)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	__this_cpu_write(xen_in_preemptible_hcall, true);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
xen_preemptible_hcall_end(void)227*4882a593Smuzhiyun static inline void xen_preemptible_hcall_end(void)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	__this_cpu_write(xen_in_preemptible_hcall, false);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #else
233*4882a593Smuzhiyun 
xen_preemptible_hcall_begin(void)234*4882a593Smuzhiyun static inline void xen_preemptible_hcall_begin(void) { }
xen_preemptible_hcall_end(void)235*4882a593Smuzhiyun static inline void xen_preemptible_hcall_end(void) { }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #endif /* INCLUDE_XEN_OPS_H */
240