xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/hyp/nvhe/mm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2020 Google LLC
4*4882a593Smuzhiyun  * Author: Quentin Perret <qperret@google.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/kvm_host.h>
8*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
9*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
10*4882a593Smuzhiyun #include <asm/kvm_pgtable.h>
11*4882a593Smuzhiyun #include <asm/spectre.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <nvhe/early_alloc.h>
14*4882a593Smuzhiyun #include <nvhe/gfp.h>
15*4882a593Smuzhiyun #include <nvhe/memory.h>
16*4882a593Smuzhiyun #include <nvhe/mm.h>
17*4882a593Smuzhiyun #include <nvhe/spinlock.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct kvm_pgtable pkvm_pgtable;
20*4882a593Smuzhiyun hyp_spinlock_t pkvm_pgd_lock;
21*4882a593Smuzhiyun u64 __io_map_base;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
24*4882a593Smuzhiyun unsigned int hyp_memblock_nr;
25*4882a593Smuzhiyun 
__pkvm_create_mappings(unsigned long start,unsigned long size,unsigned long phys,enum kvm_pgtable_prot prot)26*4882a593Smuzhiyun int __pkvm_create_mappings(unsigned long start, unsigned long size,
27*4882a593Smuzhiyun 			  unsigned long phys, enum kvm_pgtable_prot prot)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	int err;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	hyp_spin_lock(&pkvm_pgd_lock);
32*4882a593Smuzhiyun 	err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
33*4882a593Smuzhiyun 	hyp_spin_unlock(&pkvm_pgd_lock);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	return err;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
__pkvm_create_private_mapping(phys_addr_t phys,size_t size,enum kvm_pgtable_prot prot)38*4882a593Smuzhiyun unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
39*4882a593Smuzhiyun 					    enum kvm_pgtable_prot prot)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	unsigned long addr;
42*4882a593Smuzhiyun 	int err;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	hyp_spin_lock(&pkvm_pgd_lock);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	size = PAGE_ALIGN(size + offset_in_page(phys));
47*4882a593Smuzhiyun 	addr = __io_map_base;
48*4882a593Smuzhiyun 	__io_map_base += size;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* Are we overflowing on the vmemmap ? */
51*4882a593Smuzhiyun 	if (__io_map_base > __hyp_vmemmap) {
52*4882a593Smuzhiyun 		__io_map_base -= size;
53*4882a593Smuzhiyun 		addr = (unsigned long)ERR_PTR(-ENOMEM);
54*4882a593Smuzhiyun 		goto out;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
58*4882a593Smuzhiyun 	if (err) {
59*4882a593Smuzhiyun 		addr = (unsigned long)ERR_PTR(err);
60*4882a593Smuzhiyun 		goto out;
61*4882a593Smuzhiyun 	}
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	addr = addr + offset_in_page(phys);
64*4882a593Smuzhiyun out:
65*4882a593Smuzhiyun 	hyp_spin_unlock(&pkvm_pgd_lock);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	return addr;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
pkvm_create_mappings(void * from,void * to,enum kvm_pgtable_prot prot)70*4882a593Smuzhiyun int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	unsigned long start = (unsigned long)from;
73*4882a593Smuzhiyun 	unsigned long end = (unsigned long)to;
74*4882a593Smuzhiyun 	unsigned long virt_addr;
75*4882a593Smuzhiyun 	phys_addr_t phys;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	start = start & PAGE_MASK;
78*4882a593Smuzhiyun 	end = PAGE_ALIGN(end);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
81*4882a593Smuzhiyun 		int err;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		phys = hyp_virt_to_phys((void *)virt_addr);
84*4882a593Smuzhiyun 		err = __pkvm_create_mappings(virt_addr, PAGE_SIZE, phys, prot);
85*4882a593Smuzhiyun 		if (err)
86*4882a593Smuzhiyun 			return err;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return 0;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
hyp_back_vmemmap(phys_addr_t phys,unsigned long size,phys_addr_t back)92*4882a593Smuzhiyun int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	unsigned long start, end;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	hyp_vmemmap_range(phys, size, &start, &end);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun static void *__hyp_bp_vect_base;
pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)102*4882a593Smuzhiyun int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	void *vector;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	switch (slot) {
107*4882a593Smuzhiyun 	case HYP_VECTOR_DIRECT: {
108*4882a593Smuzhiyun 		vector = __kvm_hyp_vector;
109*4882a593Smuzhiyun 		break;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 	case HYP_VECTOR_SPECTRE_DIRECT: {
112*4882a593Smuzhiyun 		vector = __bp_harden_hyp_vecs;
113*4882a593Smuzhiyun 		break;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 	case HYP_VECTOR_INDIRECT:
116*4882a593Smuzhiyun 	case HYP_VECTOR_SPECTRE_INDIRECT: {
117*4882a593Smuzhiyun 		vector = (void *)__hyp_bp_vect_base;
118*4882a593Smuzhiyun 		break;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 	default:
121*4882a593Smuzhiyun 		return -EINVAL;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	vector = __kvm_vector_slot2addr(vector, slot);
125*4882a593Smuzhiyun 	*this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
hyp_map_vectors(void)130*4882a593Smuzhiyun int hyp_map_vectors(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	phys_addr_t phys;
133*4882a593Smuzhiyun 	void *bp_base;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (!kvm_system_needs_idmapped_vectors()) {
136*4882a593Smuzhiyun 		__hyp_bp_vect_base = __bp_harden_hyp_vecs;
137*4882a593Smuzhiyun 		return 0;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	phys = __hyp_pa(__bp_harden_hyp_vecs);
141*4882a593Smuzhiyun 	bp_base = (void *)__pkvm_create_private_mapping(phys,
142*4882a593Smuzhiyun 							__BP_HARDEN_HYP_VECS_SZ,
143*4882a593Smuzhiyun 							PAGE_HYP_EXEC);
144*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(bp_base))
145*4882a593Smuzhiyun 		return PTR_ERR(bp_base);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	__hyp_bp_vect_base = bp_base;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
hyp_create_idmap(u32 hyp_va_bits)152*4882a593Smuzhiyun int hyp_create_idmap(u32 hyp_va_bits)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	unsigned long start, end;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
157*4882a593Smuzhiyun 	start = ALIGN_DOWN(start, PAGE_SIZE);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
160*4882a593Smuzhiyun 	end = ALIGN(end, PAGE_SIZE);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/*
163*4882a593Smuzhiyun 	 * One half of the VA space is reserved to linearly map portions of
164*4882a593Smuzhiyun 	 * memory -- see va_layout.c for more details. The other half of the VA
165*4882a593Smuzhiyun 	 * space contains the trampoline page, and needs some care. Split that
166*4882a593Smuzhiyun 	 * second half in two and find the quarter of VA space not conflicting
167*4882a593Smuzhiyun 	 * with the idmap to place the IOs and the vmemmap. IOs use the lower
168*4882a593Smuzhiyun 	 * half of the quarter and the vmemmap the upper half.
169*4882a593Smuzhiyun 	 */
170*4882a593Smuzhiyun 	__io_map_base = start & BIT(hyp_va_bits - 2);
171*4882a593Smuzhiyun 	__io_map_base ^= BIT(hyp_va_bits - 2);
172*4882a593Smuzhiyun 	__hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
175*4882a593Smuzhiyun }
176