xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/kvm_pgtable.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2020 Google LLC
4*4882a593Smuzhiyun  * Author: Will Deacon <will@kernel.org>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef __ARM64_KVM_PGTABLE_H__
8*4882a593Smuzhiyun #define __ARM64_KVM_PGTABLE_H__
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/bits.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define KVM_PGTABLE_MAX_LEVELS		4U
15*4882a593Smuzhiyun 
kvm_get_parange(u64 mmfr0)16*4882a593Smuzhiyun static inline u64 kvm_get_parange(u64 mmfr0)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
19*4882a593Smuzhiyun 				ID_AA64MMFR0_PARANGE_SHIFT);
20*4882a593Smuzhiyun 	if (parange > ID_AA64MMFR0_PARANGE_MAX)
21*4882a593Smuzhiyun 		parange = ID_AA64MMFR0_PARANGE_MAX;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	return parange;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun typedef u64 kvm_pte_t;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /**
29*4882a593Smuzhiyun  * struct kvm_pgtable_mm_ops - Memory management callbacks.
30*4882a593Smuzhiyun  * @zalloc_page:	Allocate a single zeroed memory page. The @arg parameter
31*4882a593Smuzhiyun  *			can be used by the walker to pass a memcache. The
32*4882a593Smuzhiyun  *			initial refcount of the page is 1.
33*4882a593Smuzhiyun  * @zalloc_pages_exact:	Allocate an exact number of zeroed memory pages. The
34*4882a593Smuzhiyun  *			@size parameter is in bytes, and is rounded-up to the
35*4882a593Smuzhiyun  *			next page boundary. The resulting allocation is
36*4882a593Smuzhiyun  *			physically contiguous.
37*4882a593Smuzhiyun  * @free_pages_exact:	Free an exact number of memory pages previously
38*4882a593Smuzhiyun  *			allocated by zalloc_pages_exact.
39*4882a593Smuzhiyun  * @get_page:		Increment the refcount on a page.
40*4882a593Smuzhiyun  * @put_page:		Decrement the refcount on a page. When the refcount
41*4882a593Smuzhiyun  *			reaches 0 the page is automatically freed.
42*4882a593Smuzhiyun  * @page_count:		Return the refcount of a page.
43*4882a593Smuzhiyun  * @phys_to_virt:	Convert a physical address into a virtual address mapped
44*4882a593Smuzhiyun  *			in the current context.
45*4882a593Smuzhiyun  * @virt_to_phys:	Convert a virtual address mapped in the current context
46*4882a593Smuzhiyun  *			into a physical address.
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun struct kvm_pgtable_mm_ops {
49*4882a593Smuzhiyun 	void*		(*zalloc_page)(void *arg);
50*4882a593Smuzhiyun 	void*		(*zalloc_pages_exact)(size_t size);
51*4882a593Smuzhiyun 	void		(*free_pages_exact)(void *addr, size_t size);
52*4882a593Smuzhiyun 	void		(*get_page)(void *addr);
53*4882a593Smuzhiyun 	void		(*put_page)(void *addr);
54*4882a593Smuzhiyun 	int		(*page_count)(void *addr);
55*4882a593Smuzhiyun 	void*		(*phys_to_virt)(phys_addr_t phys);
56*4882a593Smuzhiyun 	phys_addr_t	(*virt_to_phys)(void *addr);
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun  * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
61*4882a593Smuzhiyun  * @KVM_PGTABLE_S2_NOFWB:	Don't enforce Normal-WB even if the CPUs have
62*4882a593Smuzhiyun  *				ARM64_HAS_STAGE2_FWB.
63*4882a593Smuzhiyun  * @KVM_PGTABLE_S2_IDMAP:	Only use identity mappings.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun enum kvm_pgtable_stage2_flags {
66*4882a593Smuzhiyun 	KVM_PGTABLE_S2_NOFWB			= BIT(0),
67*4882a593Smuzhiyun 	KVM_PGTABLE_S2_IDMAP			= BIT(1),
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun  * struct kvm_pgtable - KVM page-table.
72*4882a593Smuzhiyun  * @ia_bits:		Maximum input address size, in bits.
73*4882a593Smuzhiyun  * @start_level:	Level at which the page-table walk starts.
74*4882a593Smuzhiyun  * @pgd:		Pointer to the first top-level entry of the page-table.
75*4882a593Smuzhiyun  * @mm_ops:		Memory management callbacks.
76*4882a593Smuzhiyun  * @mmu:		Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun struct kvm_pgtable {
79*4882a593Smuzhiyun 	u32					ia_bits;
80*4882a593Smuzhiyun 	u32					start_level;
81*4882a593Smuzhiyun 	kvm_pte_t				*pgd;
82*4882a593Smuzhiyun 	struct kvm_pgtable_mm_ops		*mm_ops;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* Stage-2 only */
85*4882a593Smuzhiyun 	struct kvm_s2_mmu			*mmu;
86*4882a593Smuzhiyun 	enum kvm_pgtable_stage2_flags		flags;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /**
90*4882a593Smuzhiyun  * enum kvm_pgtable_prot - Page-table permissions and attributes.
91*4882a593Smuzhiyun  * @KVM_PGTABLE_PROT_X:		Execute permission.
92*4882a593Smuzhiyun  * @KVM_PGTABLE_PROT_W:		Write permission.
93*4882a593Smuzhiyun  * @KVM_PGTABLE_PROT_R:		Read permission.
94*4882a593Smuzhiyun  * @KVM_PGTABLE_PROT_DEVICE:	Device attributes.
95*4882a593Smuzhiyun  */
96*4882a593Smuzhiyun enum kvm_pgtable_prot {
97*4882a593Smuzhiyun 	KVM_PGTABLE_PROT_X			= BIT(0),
98*4882a593Smuzhiyun 	KVM_PGTABLE_PROT_W			= BIT(1),
99*4882a593Smuzhiyun 	KVM_PGTABLE_PROT_R			= BIT(2),
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	KVM_PGTABLE_PROT_DEVICE			= BIT(3),
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define PAGE_HYP		(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
105*4882a593Smuzhiyun #define PAGE_HYP_EXEC		(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
106*4882a593Smuzhiyun #define PAGE_HYP_RO		(KVM_PGTABLE_PROT_R)
107*4882a593Smuzhiyun #define PAGE_HYP_DEVICE		(PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun  * struct kvm_mem_range - Range of Intermediate Physical Addresses
111*4882a593Smuzhiyun  * @start:	Start of the range.
112*4882a593Smuzhiyun  * @end:	End of the range.
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun struct kvm_mem_range {
115*4882a593Smuzhiyun 	u64 start;
116*4882a593Smuzhiyun 	u64 end;
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun  * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
121*4882a593Smuzhiyun  * @KVM_PGTABLE_WALK_LEAF:		Visit leaf entries, including invalid
122*4882a593Smuzhiyun  *					entries.
123*4882a593Smuzhiyun  * @KVM_PGTABLE_WALK_TABLE_PRE:		Visit table entries before their
124*4882a593Smuzhiyun  *					children.
125*4882a593Smuzhiyun  * @KVM_PGTABLE_WALK_TABLE_POST:	Visit table entries after their
126*4882a593Smuzhiyun  *					children.
127*4882a593Smuzhiyun  */
128*4882a593Smuzhiyun enum kvm_pgtable_walk_flags {
129*4882a593Smuzhiyun 	KVM_PGTABLE_WALK_LEAF			= BIT(0),
130*4882a593Smuzhiyun 	KVM_PGTABLE_WALK_TABLE_PRE		= BIT(1),
131*4882a593Smuzhiyun 	KVM_PGTABLE_WALK_TABLE_POST		= BIT(2),
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun typedef int (*kvm_pgtable_visitor_fn_t)(u64 addr, u64 end, u32 level,
135*4882a593Smuzhiyun 					kvm_pte_t *ptep,
136*4882a593Smuzhiyun 					enum kvm_pgtable_walk_flags flag,
137*4882a593Smuzhiyun 					void * const arg);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun  * struct kvm_pgtable_walker - Hook into a page-table walk.
141*4882a593Smuzhiyun  * @cb:		Callback function to invoke during the walk.
142*4882a593Smuzhiyun  * @arg:	Argument passed to the callback function.
143*4882a593Smuzhiyun  * @flags:	Bitwise-OR of flags to identify the entry types on which to
144*4882a593Smuzhiyun  *		invoke the callback function.
145*4882a593Smuzhiyun  */
146*4882a593Smuzhiyun struct kvm_pgtable_walker {
147*4882a593Smuzhiyun 	const kvm_pgtable_visitor_fn_t		cb;
148*4882a593Smuzhiyun 	void * const				arg;
149*4882a593Smuzhiyun 	const enum kvm_pgtable_walk_flags	flags;
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun  * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
154*4882a593Smuzhiyun  * @pgt:	Uninitialised page-table structure to initialise.
155*4882a593Smuzhiyun  * @va_bits:	Maximum virtual address bits.
156*4882a593Smuzhiyun  * @mm_ops:	Memory management callbacks.
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
159*4882a593Smuzhiyun  */
160*4882a593Smuzhiyun int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
161*4882a593Smuzhiyun 			 struct kvm_pgtable_mm_ops *mm_ops);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun  * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
165*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * The page-table is assumed to be unreachable by any hardware walkers prior
168*4882a593Smuzhiyun  * to freeing and therefore no TLB invalidation is performed.
169*4882a593Smuzhiyun  */
170*4882a593Smuzhiyun void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun /**
173*4882a593Smuzhiyun  * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
174*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
175*4882a593Smuzhiyun  * @addr:	Virtual address at which to place the mapping.
176*4882a593Smuzhiyun  * @size:	Size of the mapping.
177*4882a593Smuzhiyun  * @phys:	Physical address of the memory to map.
178*4882a593Smuzhiyun  * @prot:	Permissions and attributes for the mapping.
179*4882a593Smuzhiyun  *
180*4882a593Smuzhiyun  * The offset of @addr within a page is ignored, @size is rounded-up to
181*4882a593Smuzhiyun  * the next page boundary and @phys is rounded-down to the previous page
182*4882a593Smuzhiyun  * boundary.
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * If device attributes are not explicitly requested in @prot, then the
185*4882a593Smuzhiyun  * mapping will be normal, cacheable. Attempts to install a new mapping
186*4882a593Smuzhiyun  * for a virtual address that is already mapped will be rejected with an
187*4882a593Smuzhiyun  * error and a WARN().
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
190*4882a593Smuzhiyun  */
191*4882a593Smuzhiyun int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
192*4882a593Smuzhiyun 			enum kvm_pgtable_prot prot);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun /**
195*4882a593Smuzhiyun  * kvm_get_vtcr() - Helper to construct VTCR_EL2
196*4882a593Smuzhiyun  * @mmfr0:	Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
197*4882a593Smuzhiyun  * @mmfr1:	Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
198*4882a593Smuzhiyun  * @phys_shfit:	Value to set in VTCR_EL2.T0SZ.
199*4882a593Smuzhiyun  *
200*4882a593Smuzhiyun  * The VTCR value is common across all the physical CPUs on the system.
201*4882a593Smuzhiyun  * We use system wide sanitised values to fill in different fields,
202*4882a593Smuzhiyun  * except for Hardware Management of Access Flags. HA Flag is set
203*4882a593Smuzhiyun  * unconditionally on all CPUs, as it is safe to run with or without
204*4882a593Smuzhiyun  * the feature and the bit is RES0 on CPUs that don't support it.
205*4882a593Smuzhiyun  *
206*4882a593Smuzhiyun  * Return: VTCR_EL2 value
207*4882a593Smuzhiyun  */
208*4882a593Smuzhiyun u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun  * kvm_pgtable_stage2_init_flags() - Initialise a guest stage-2 page-table.
212*4882a593Smuzhiyun  * @pgt:	Uninitialised page-table structure to initialise.
213*4882a593Smuzhiyun  * @arch:	Arch-specific KVM structure representing the guest virtual
214*4882a593Smuzhiyun  *		machine.
215*4882a593Smuzhiyun  * @mm_ops:	Memory management callbacks.
216*4882a593Smuzhiyun  * @flags:	Stage-2 configuration flags.
217*4882a593Smuzhiyun  *
218*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
219*4882a593Smuzhiyun  */
220*4882a593Smuzhiyun int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
221*4882a593Smuzhiyun 				  struct kvm_pgtable_mm_ops *mm_ops,
222*4882a593Smuzhiyun 				  enum kvm_pgtable_stage2_flags flags);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun #define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \
225*4882a593Smuzhiyun 	kvm_pgtable_stage2_init_flags(pgt, arch, mm_ops, 0)
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /**
228*4882a593Smuzhiyun  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
229*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
230*4882a593Smuzhiyun  *
231*4882a593Smuzhiyun  * The page-table is assumed to be unreachable by any hardware walkers prior
232*4882a593Smuzhiyun  * to freeing and therefore no TLB invalidation is performed.
233*4882a593Smuzhiyun  */
234*4882a593Smuzhiyun void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /**
237*4882a593Smuzhiyun  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
238*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
239*4882a593Smuzhiyun  * @addr:	Intermediate physical address at which to place the mapping.
240*4882a593Smuzhiyun  * @size:	Size of the mapping.
241*4882a593Smuzhiyun  * @phys:	Physical address of the memory to map.
242*4882a593Smuzhiyun  * @prot:	Permissions and attributes for the mapping.
243*4882a593Smuzhiyun  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
244*4882a593Smuzhiyun  *		page-table pages.
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * The offset of @addr within a page is ignored, @size is rounded-up to
247*4882a593Smuzhiyun  * the next page boundary and @phys is rounded-down to the previous page
248*4882a593Smuzhiyun  * boundary.
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * If device attributes are not explicitly requested in @prot, then the
251*4882a593Smuzhiyun  * mapping will be normal, cacheable.
252*4882a593Smuzhiyun  *
253*4882a593Smuzhiyun  * Note that the update of a valid leaf PTE in this function will be aborted,
254*4882a593Smuzhiyun  * if it's trying to recreate the exact same mapping or only change the access
255*4882a593Smuzhiyun  * permissions. Instead, the vCPU will exit one more time from guest if still
256*4882a593Smuzhiyun  * needed and then go through the path of relaxing permissions.
257*4882a593Smuzhiyun  *
258*4882a593Smuzhiyun  * Note that this function will both coalesce existing table entries and split
259*4882a593Smuzhiyun  * existing block mappings, relying on page-faults to fault back areas outside
260*4882a593Smuzhiyun  * of the new mapping lazily.
261*4882a593Smuzhiyun  *
262*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
263*4882a593Smuzhiyun  */
264*4882a593Smuzhiyun int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
265*4882a593Smuzhiyun 			   u64 phys, enum kvm_pgtable_prot prot,
266*4882a593Smuzhiyun 			   void *mc);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun  * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
270*4882a593Smuzhiyun  *				    track ownership.
271*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
272*4882a593Smuzhiyun  * @addr:	Base intermediate physical address to annotate.
273*4882a593Smuzhiyun  * @size:	Size of the annotated range.
274*4882a593Smuzhiyun  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
275*4882a593Smuzhiyun  *		page-table pages.
276*4882a593Smuzhiyun  * @owner_id:	Unique identifier for the owner of the page.
277*4882a593Smuzhiyun  *
278*4882a593Smuzhiyun  * By default, all page-tables are owned by identifier 0. This function can be
279*4882a593Smuzhiyun  * used to mark portions of the IPA space as owned by other entities. When a
280*4882a593Smuzhiyun  * stage 2 is used with identity-mappings, these annotations allow to use the
281*4882a593Smuzhiyun  * page-table data structure as a simple rmap.
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
284*4882a593Smuzhiyun  */
285*4882a593Smuzhiyun int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
286*4882a593Smuzhiyun 				 void *mc, u8 owner_id);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /**
289*4882a593Smuzhiyun  * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
290*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
291*4882a593Smuzhiyun  * @addr:	Intermediate physical address from which to remove the mapping.
292*4882a593Smuzhiyun  * @size:	Size of the mapping.
293*4882a593Smuzhiyun  *
294*4882a593Smuzhiyun  * The offset of @addr within a page is ignored and @size is rounded-up to
295*4882a593Smuzhiyun  * the next page boundary.
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * TLB invalidation is performed for each page-table entry cleared during the
298*4882a593Smuzhiyun  * unmapping operation and the reference count for the page-table page
299*4882a593Smuzhiyun  * containing the cleared entry is decremented, with unreferenced pages being
300*4882a593Smuzhiyun  * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
301*4882a593Smuzhiyun  * FWB is not supported by the CPU.
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
304*4882a593Smuzhiyun  */
305*4882a593Smuzhiyun int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun  * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
309*4882a593Smuzhiyun  *                                  without TLB invalidation.
310*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
311*4882a593Smuzhiyun  * @addr:	Intermediate physical address from which to write-protect,
312*4882a593Smuzhiyun  * @size:	Size of the range.
313*4882a593Smuzhiyun  *
314*4882a593Smuzhiyun  * The offset of @addr within a page is ignored and @size is rounded-up to
315*4882a593Smuzhiyun  * the next page boundary.
316*4882a593Smuzhiyun  *
317*4882a593Smuzhiyun  * Note that it is the caller's responsibility to invalidate the TLB after
318*4882a593Smuzhiyun  * calling this function to ensure that the updated permissions are visible
319*4882a593Smuzhiyun  * to the CPUs.
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
322*4882a593Smuzhiyun  */
323*4882a593Smuzhiyun int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun  * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
327*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
328*4882a593Smuzhiyun  * @addr:	Intermediate physical address to identify the page-table entry.
329*4882a593Smuzhiyun  *
330*4882a593Smuzhiyun  * The offset of @addr within a page is ignored.
331*4882a593Smuzhiyun  *
332*4882a593Smuzhiyun  * If there is a valid, leaf page-table entry used to translate @addr, then
333*4882a593Smuzhiyun  * set the access flag in that entry.
334*4882a593Smuzhiyun  *
335*4882a593Smuzhiyun  * Return: The old page-table entry prior to setting the flag, 0 on failure.
336*4882a593Smuzhiyun  */
337*4882a593Smuzhiyun kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /**
340*4882a593Smuzhiyun  * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
341*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
342*4882a593Smuzhiyun  * @addr:	Intermediate physical address to identify the page-table entry.
343*4882a593Smuzhiyun  *
344*4882a593Smuzhiyun  * The offset of @addr within a page is ignored.
345*4882a593Smuzhiyun  *
346*4882a593Smuzhiyun  * If there is a valid, leaf page-table entry used to translate @addr, then
347*4882a593Smuzhiyun  * clear the access flag in that entry.
348*4882a593Smuzhiyun  *
349*4882a593Smuzhiyun  * Note that it is the caller's responsibility to invalidate the TLB after
350*4882a593Smuzhiyun  * calling this function to ensure that the updated permissions are visible
351*4882a593Smuzhiyun  * to the CPUs.
352*4882a593Smuzhiyun  *
353*4882a593Smuzhiyun  * Return: The old page-table entry prior to clearing the flag, 0 on failure.
354*4882a593Smuzhiyun  */
355*4882a593Smuzhiyun kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /**
358*4882a593Smuzhiyun  * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
359*4882a593Smuzhiyun  *				      page-table entry.
360*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
361*4882a593Smuzhiyun  * @addr:	Intermediate physical address to identify the page-table entry.
362*4882a593Smuzhiyun  * @prot:	Additional permissions to grant for the mapping.
363*4882a593Smuzhiyun  *
364*4882a593Smuzhiyun  * The offset of @addr within a page is ignored.
365*4882a593Smuzhiyun  *
366*4882a593Smuzhiyun  * If there is a valid, leaf page-table entry used to translate @addr, then
367*4882a593Smuzhiyun  * relax the permissions in that entry according to the read, write and
368*4882a593Smuzhiyun  * execute permissions specified by @prot. No permissions are removed, and
369*4882a593Smuzhiyun  * TLB invalidation is performed after updating the entry.
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
372*4882a593Smuzhiyun  */
373*4882a593Smuzhiyun int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
374*4882a593Smuzhiyun 				   enum kvm_pgtable_prot prot);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun /**
377*4882a593Smuzhiyun  * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
378*4882a593Smuzhiyun  *				   access flag set.
379*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
380*4882a593Smuzhiyun  * @addr:	Intermediate physical address to identify the page-table entry.
381*4882a593Smuzhiyun  *
382*4882a593Smuzhiyun  * The offset of @addr within a page is ignored.
383*4882a593Smuzhiyun  *
384*4882a593Smuzhiyun  * Return: True if the page-table entry has the access flag set, false otherwise.
385*4882a593Smuzhiyun  */
386*4882a593Smuzhiyun bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun /**
389*4882a593Smuzhiyun  * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
390*4882a593Smuzhiyun  * 				      of Coherency for guest stage-2 address
391*4882a593Smuzhiyun  *				      range.
392*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
393*4882a593Smuzhiyun  * @addr:	Intermediate physical address from which to flush.
394*4882a593Smuzhiyun  * @size:	Size of the range.
395*4882a593Smuzhiyun  *
396*4882a593Smuzhiyun  * The offset of @addr within a page is ignored and @size is rounded-up to
397*4882a593Smuzhiyun  * the next page boundary.
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
400*4882a593Smuzhiyun  */
401*4882a593Smuzhiyun int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /**
404*4882a593Smuzhiyun  * kvm_pgtable_walk() - Walk a page-table.
405*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init().
406*4882a593Smuzhiyun  * @addr:	Input address for the start of the walk.
407*4882a593Smuzhiyun  * @size:	Size of the range to walk.
408*4882a593Smuzhiyun  * @walker:	Walker callback description.
409*4882a593Smuzhiyun  *
410*4882a593Smuzhiyun  * The offset of @addr within a page is ignored and @size is rounded-up to
411*4882a593Smuzhiyun  * the next page boundary.
412*4882a593Smuzhiyun  *
413*4882a593Smuzhiyun  * The walker will walk the page-table entries corresponding to the input
414*4882a593Smuzhiyun  * address range specified, visiting entries according to the walker flags.
415*4882a593Smuzhiyun  * Invalid entries are treated as leaf entries. Leaf entries are reloaded
416*4882a593Smuzhiyun  * after invoking the walker callback, allowing the walker to descend into
417*4882a593Smuzhiyun  * a newly installed table.
418*4882a593Smuzhiyun  *
419*4882a593Smuzhiyun  * Returning a negative error code from the walker callback function will
420*4882a593Smuzhiyun  * terminate the walk immediately with the same error code.
421*4882a593Smuzhiyun  *
422*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
423*4882a593Smuzhiyun  */
424*4882a593Smuzhiyun int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
425*4882a593Smuzhiyun 		     struct kvm_pgtable_walker *walker);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun /**
428*4882a593Smuzhiyun  * kvm_pgtable_stage2_find_range() - Find a range of Intermediate Physical
429*4882a593Smuzhiyun  *				     Addresses with compatible permission
430*4882a593Smuzhiyun  *				     attributes.
431*4882a593Smuzhiyun  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
432*4882a593Smuzhiyun  * @addr:	Address that must be covered by the range.
433*4882a593Smuzhiyun  * @prot:	Protection attributes that the range must be compatible with.
434*4882a593Smuzhiyun  * @range:	Range structure used to limit the search space at call time and
435*4882a593Smuzhiyun  *		that will hold the result.
436*4882a593Smuzhiyun  *
437*4882a593Smuzhiyun  * The offset of @addr within a page is ignored. An IPA is compatible with @prot
438*4882a593Smuzhiyun  * iff its corresponding stage-2 page-table entry has default ownership and, if
439*4882a593Smuzhiyun  * valid, is mapped with protection attributes identical to @prot.
440*4882a593Smuzhiyun  *
441*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure.
442*4882a593Smuzhiyun  */
443*4882a593Smuzhiyun int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
444*4882a593Smuzhiyun 				  enum kvm_pgtable_prot prot,
445*4882a593Smuzhiyun 				  struct kvm_mem_range *range);
446*4882a593Smuzhiyun #endif	/* __ARM64_KVM_PGTABLE_H__ */
447