xref: /OK3568_Linux_fs/kernel/arch/x86/kvm/mmu/mmu_audit.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * mmu_audit.c:
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Audit code for KVM MMU
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 2006 Qumranet, Inc.
8*4882a593Smuzhiyun  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Authors:
11*4882a593Smuzhiyun  *   Yaniv Kamay  <yaniv@qumranet.com>
12*4882a593Smuzhiyun  *   Avi Kivity   <avi@qumranet.com>
13*4882a593Smuzhiyun  *   Marcelo Tosatti <mtosatti@redhat.com>
14*4882a593Smuzhiyun  *   Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/ratelimit.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static char const *audit_point_name[] = {
20*4882a593Smuzhiyun 	"pre page fault",
21*4882a593Smuzhiyun 	"post page fault",
22*4882a593Smuzhiyun 	"pre pte write",
23*4882a593Smuzhiyun 	"post pte write",
24*4882a593Smuzhiyun 	"pre sync",
25*4882a593Smuzhiyun 	"post sync"
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define audit_printk(kvm, fmt, args...)		\
29*4882a593Smuzhiyun 	printk(KERN_ERR "audit: (%s) error: "	\
30*4882a593Smuzhiyun 		fmt, audit_point_name[kvm->arch.audit_point], ##args)
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
33*4882a593Smuzhiyun 
__mmu_spte_walk(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,inspect_spte_fn fn,int level)34*4882a593Smuzhiyun static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
35*4882a593Smuzhiyun 			    inspect_spte_fn fn, int level)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	int i;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
40*4882a593Smuzhiyun 		u64 *ent = sp->spt;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 		fn(vcpu, ent + i, level);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 		if (is_shadow_present_pte(ent[i]) &&
45*4882a593Smuzhiyun 		      !is_last_spte(ent[i], level)) {
46*4882a593Smuzhiyun 			struct kvm_mmu_page *child;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 			child = to_shadow_page(ent[i] & PT64_BASE_ADDR_MASK);
49*4882a593Smuzhiyun 			__mmu_spte_walk(vcpu, child, fn, level - 1);
50*4882a593Smuzhiyun 		}
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
mmu_spte_walk(struct kvm_vcpu * vcpu,inspect_spte_fn fn)54*4882a593Smuzhiyun static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	int i;
57*4882a593Smuzhiyun 	struct kvm_mmu_page *sp;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
60*4882a593Smuzhiyun 		return;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
63*4882a593Smuzhiyun 		hpa_t root = vcpu->arch.mmu->root_hpa;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 		sp = to_shadow_page(root);
66*4882a593Smuzhiyun 		__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
67*4882a593Smuzhiyun 		return;
68*4882a593Smuzhiyun 	}
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	for (i = 0; i < 4; ++i) {
71*4882a593Smuzhiyun 		hpa_t root = vcpu->arch.mmu->pae_root[i];
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 		if (root && VALID_PAGE(root)) {
74*4882a593Smuzhiyun 			root &= PT64_BASE_ADDR_MASK;
75*4882a593Smuzhiyun 			sp = to_shadow_page(root);
76*4882a593Smuzhiyun 			__mmu_spte_walk(vcpu, sp, fn, 2);
77*4882a593Smuzhiyun 		}
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
84*4882a593Smuzhiyun 
walk_all_active_sps(struct kvm * kvm,sp_handler fn)85*4882a593Smuzhiyun static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct kvm_mmu_page *sp;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
90*4882a593Smuzhiyun 		fn(kvm, sp);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
audit_mappings(struct kvm_vcpu * vcpu,u64 * sptep,int level)93*4882a593Smuzhiyun static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct kvm_mmu_page *sp;
96*4882a593Smuzhiyun 	gfn_t gfn;
97*4882a593Smuzhiyun 	kvm_pfn_t pfn;
98*4882a593Smuzhiyun 	hpa_t hpa;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	sp = sptep_to_sp(sptep);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	if (sp->unsync) {
103*4882a593Smuzhiyun 		if (level != PG_LEVEL_4K) {
104*4882a593Smuzhiyun 			audit_printk(vcpu->kvm, "unsync sp: %p "
105*4882a593Smuzhiyun 				     "level = %d\n", sp, level);
106*4882a593Smuzhiyun 			return;
107*4882a593Smuzhiyun 		}
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
111*4882a593Smuzhiyun 		return;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
114*4882a593Smuzhiyun 	pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if (is_error_pfn(pfn))
117*4882a593Smuzhiyun 		return;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	hpa =  pfn << PAGE_SHIFT;
120*4882a593Smuzhiyun 	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
121*4882a593Smuzhiyun 		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
122*4882a593Smuzhiyun 			     "ent %llxn", vcpu->arch.mmu->root_level, pfn,
123*4882a593Smuzhiyun 			     hpa, *sptep);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
inspect_spte_has_rmap(struct kvm * kvm,u64 * sptep)126*4882a593Smuzhiyun static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
129*4882a593Smuzhiyun 	struct kvm_rmap_head *rmap_head;
130*4882a593Smuzhiyun 	struct kvm_mmu_page *rev_sp;
131*4882a593Smuzhiyun 	struct kvm_memslots *slots;
132*4882a593Smuzhiyun 	struct kvm_memory_slot *slot;
133*4882a593Smuzhiyun 	gfn_t gfn;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	rev_sp = sptep_to_sp(sptep);
136*4882a593Smuzhiyun 	gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
139*4882a593Smuzhiyun 	slot = __gfn_to_memslot(slots, gfn);
140*4882a593Smuzhiyun 	if (!slot) {
141*4882a593Smuzhiyun 		if (!__ratelimit(&ratelimit_state))
142*4882a593Smuzhiyun 			return;
143*4882a593Smuzhiyun 		audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
144*4882a593Smuzhiyun 		audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
145*4882a593Smuzhiyun 		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
146*4882a593Smuzhiyun 		dump_stack();
147*4882a593Smuzhiyun 		return;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
151*4882a593Smuzhiyun 	if (!rmap_head->val) {
152*4882a593Smuzhiyun 		if (!__ratelimit(&ratelimit_state))
153*4882a593Smuzhiyun 			return;
154*4882a593Smuzhiyun 		audit_printk(kvm, "no rmap for writable spte %llx\n",
155*4882a593Smuzhiyun 			     *sptep);
156*4882a593Smuzhiyun 		dump_stack();
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
audit_sptes_have_rmaps(struct kvm_vcpu * vcpu,u64 * sptep,int level)160*4882a593Smuzhiyun static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
163*4882a593Smuzhiyun 		inspect_spte_has_rmap(vcpu->kvm, sptep);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
audit_spte_after_sync(struct kvm_vcpu * vcpu,u64 * sptep,int level)166*4882a593Smuzhiyun static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
171*4882a593Smuzhiyun 		audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
172*4882a593Smuzhiyun 			     "root.\n", sp);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
check_mappings_rmap(struct kvm * kvm,struct kvm_mmu_page * sp)175*4882a593Smuzhiyun static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	int i;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (sp->role.level != PG_LEVEL_4K)
180*4882a593Smuzhiyun 		return;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
183*4882a593Smuzhiyun 		if (!is_shadow_present_pte(sp->spt[i]))
184*4882a593Smuzhiyun 			continue;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		inspect_spte_has_rmap(kvm, sp->spt + i);
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
audit_write_protection(struct kvm * kvm,struct kvm_mmu_page * sp)190*4882a593Smuzhiyun static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct kvm_rmap_head *rmap_head;
193*4882a593Smuzhiyun 	u64 *sptep;
194*4882a593Smuzhiyun 	struct rmap_iterator iter;
195*4882a593Smuzhiyun 	struct kvm_memslots *slots;
196*4882a593Smuzhiyun 	struct kvm_memory_slot *slot;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (sp->role.direct || sp->unsync || sp->role.invalid)
199*4882a593Smuzhiyun 		return;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
202*4882a593Smuzhiyun 	slot = __gfn_to_memslot(slots, sp->gfn);
203*4882a593Smuzhiyun 	rmap_head = __gfn_to_rmap(sp->gfn, PG_LEVEL_4K, slot);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	for_each_rmap_spte(rmap_head, &iter, sptep) {
206*4882a593Smuzhiyun 		if (is_writable_pte(*sptep))
207*4882a593Smuzhiyun 			audit_printk(kvm, "shadow page has writable "
208*4882a593Smuzhiyun 				     "mappings: gfn %llx role %x\n",
209*4882a593Smuzhiyun 				     sp->gfn, sp->role.word);
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
audit_sp(struct kvm * kvm,struct kvm_mmu_page * sp)213*4882a593Smuzhiyun static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	check_mappings_rmap(kvm, sp);
216*4882a593Smuzhiyun 	audit_write_protection(kvm, sp);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
audit_all_active_sps(struct kvm * kvm)219*4882a593Smuzhiyun static void audit_all_active_sps(struct kvm *kvm)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	walk_all_active_sps(kvm, audit_sp);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
audit_spte(struct kvm_vcpu * vcpu,u64 * sptep,int level)224*4882a593Smuzhiyun static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	audit_sptes_have_rmaps(vcpu, sptep, level);
227*4882a593Smuzhiyun 	audit_mappings(vcpu, sptep, level);
228*4882a593Smuzhiyun 	audit_spte_after_sync(vcpu, sptep, level);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
audit_vcpu_spte(struct kvm_vcpu * vcpu)231*4882a593Smuzhiyun static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	mmu_spte_walk(vcpu, audit_spte);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static bool mmu_audit;
237*4882a593Smuzhiyun static struct static_key mmu_audit_key;
238*4882a593Smuzhiyun 
__kvm_mmu_audit(struct kvm_vcpu * vcpu,int point)239*4882a593Smuzhiyun static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (!__ratelimit(&ratelimit_state))
244*4882a593Smuzhiyun 		return;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	vcpu->kvm->arch.audit_point = point;
247*4882a593Smuzhiyun 	audit_all_active_sps(vcpu->kvm);
248*4882a593Smuzhiyun 	audit_vcpu_spte(vcpu);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
kvm_mmu_audit(struct kvm_vcpu * vcpu,int point)251*4882a593Smuzhiyun static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	if (static_key_false((&mmu_audit_key)))
254*4882a593Smuzhiyun 		__kvm_mmu_audit(vcpu, point);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
mmu_audit_enable(void)257*4882a593Smuzhiyun static void mmu_audit_enable(void)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	if (mmu_audit)
260*4882a593Smuzhiyun 		return;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	static_key_slow_inc(&mmu_audit_key);
263*4882a593Smuzhiyun 	mmu_audit = true;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
mmu_audit_disable(void)266*4882a593Smuzhiyun static void mmu_audit_disable(void)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	if (!mmu_audit)
269*4882a593Smuzhiyun 		return;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	static_key_slow_dec(&mmu_audit_key);
272*4882a593Smuzhiyun 	mmu_audit = false;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
mmu_audit_set(const char * val,const struct kernel_param * kp)275*4882a593Smuzhiyun static int mmu_audit_set(const char *val, const struct kernel_param *kp)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	int ret;
278*4882a593Smuzhiyun 	unsigned long enable;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	ret = kstrtoul(val, 10, &enable);
281*4882a593Smuzhiyun 	if (ret < 0)
282*4882a593Smuzhiyun 		return -EINVAL;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	switch (enable) {
285*4882a593Smuzhiyun 	case 0:
286*4882a593Smuzhiyun 		mmu_audit_disable();
287*4882a593Smuzhiyun 		break;
288*4882a593Smuzhiyun 	case 1:
289*4882a593Smuzhiyun 		mmu_audit_enable();
290*4882a593Smuzhiyun 		break;
291*4882a593Smuzhiyun 	default:
292*4882a593Smuzhiyun 		return -EINVAL;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun static const struct kernel_param_ops audit_param_ops = {
299*4882a593Smuzhiyun 	.set = mmu_audit_set,
300*4882a593Smuzhiyun 	.get = param_get_bool,
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun arch_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
304