xref: /OK3568_Linux_fs/kernel/arch/x86/kvm/mtrr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * vMTRR implementation
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2006 Qumranet, Inc.
6*4882a593Smuzhiyun  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7*4882a593Smuzhiyun  * Copyright(C) 2015 Intel Corporation.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Authors:
10*4882a593Smuzhiyun  *   Yaniv Kamay  <yaniv@qumranet.com>
11*4882a593Smuzhiyun  *   Avi Kivity   <avi@qumranet.com>
12*4882a593Smuzhiyun  *   Marcelo Tosatti <mtosatti@redhat.com>
13*4882a593Smuzhiyun  *   Paolo Bonzini <pbonzini@redhat.com>
14*4882a593Smuzhiyun  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/kvm_host.h>
18*4882a593Smuzhiyun #include <asm/mtrr.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "cpuid.h"
21*4882a593Smuzhiyun #include "mmu.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define IA32_MTRR_DEF_TYPE_E		(1ULL << 11)
24*4882a593Smuzhiyun #define IA32_MTRR_DEF_TYPE_FE		(1ULL << 10)
25*4882a593Smuzhiyun #define IA32_MTRR_DEF_TYPE_TYPE_MASK	(0xff)
26*4882a593Smuzhiyun 
msr_mtrr_valid(unsigned msr)27*4882a593Smuzhiyun static bool msr_mtrr_valid(unsigned msr)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	switch (msr) {
30*4882a593Smuzhiyun 	case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
31*4882a593Smuzhiyun 	case MSR_MTRRfix64K_00000:
32*4882a593Smuzhiyun 	case MSR_MTRRfix16K_80000:
33*4882a593Smuzhiyun 	case MSR_MTRRfix16K_A0000:
34*4882a593Smuzhiyun 	case MSR_MTRRfix4K_C0000:
35*4882a593Smuzhiyun 	case MSR_MTRRfix4K_C8000:
36*4882a593Smuzhiyun 	case MSR_MTRRfix4K_D0000:
37*4882a593Smuzhiyun 	case MSR_MTRRfix4K_D8000:
38*4882a593Smuzhiyun 	case MSR_MTRRfix4K_E0000:
39*4882a593Smuzhiyun 	case MSR_MTRRfix4K_E8000:
40*4882a593Smuzhiyun 	case MSR_MTRRfix4K_F0000:
41*4882a593Smuzhiyun 	case MSR_MTRRfix4K_F8000:
42*4882a593Smuzhiyun 	case MSR_MTRRdefType:
43*4882a593Smuzhiyun 	case MSR_IA32_CR_PAT:
44*4882a593Smuzhiyun 		return true;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 	return false;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
valid_mtrr_type(unsigned t)49*4882a593Smuzhiyun static bool valid_mtrr_type(unsigned t)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
kvm_mtrr_valid(struct kvm_vcpu * vcpu,u32 msr,u64 data)54*4882a593Smuzhiyun bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	int i;
57*4882a593Smuzhiyun 	u64 mask;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (!msr_mtrr_valid(msr))
60*4882a593Smuzhiyun 		return false;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (msr == MSR_IA32_CR_PAT) {
63*4882a593Smuzhiyun 		return kvm_pat_valid(data);
64*4882a593Smuzhiyun 	} else if (msr == MSR_MTRRdefType) {
65*4882a593Smuzhiyun 		if (data & ~0xcff)
66*4882a593Smuzhiyun 			return false;
67*4882a593Smuzhiyun 		return valid_mtrr_type(data & 0xff);
68*4882a593Smuzhiyun 	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
69*4882a593Smuzhiyun 		for (i = 0; i < 8 ; i++)
70*4882a593Smuzhiyun 			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
71*4882a593Smuzhiyun 				return false;
72*4882a593Smuzhiyun 		return true;
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* variable MTRRs */
76*4882a593Smuzhiyun 	WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
79*4882a593Smuzhiyun 	if ((msr & 1) == 0) {
80*4882a593Smuzhiyun 		/* MTRR base */
81*4882a593Smuzhiyun 		if (!valid_mtrr_type(data & 0xff))
82*4882a593Smuzhiyun 			return false;
83*4882a593Smuzhiyun 		mask |= 0xf00;
84*4882a593Smuzhiyun 	} else
85*4882a593Smuzhiyun 		/* MTRR mask */
86*4882a593Smuzhiyun 		mask |= 0x7ff;
87*4882a593Smuzhiyun 	if (data & mask) {
88*4882a593Smuzhiyun 		kvm_inject_gp(vcpu, 0);
89*4882a593Smuzhiyun 		return false;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	return true;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
95*4882a593Smuzhiyun 
mtrr_is_enabled(struct kvm_mtrr * mtrr_state)96*4882a593Smuzhiyun static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
fixed_mtrr_is_enabled(struct kvm_mtrr * mtrr_state)101*4882a593Smuzhiyun static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
mtrr_default_type(struct kvm_mtrr * mtrr_state)106*4882a593Smuzhiyun static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
mtrr_disabled_type(struct kvm_vcpu * vcpu)111*4882a593Smuzhiyun static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	/*
114*4882a593Smuzhiyun 	 * Intel SDM 11.11.2.2: all MTRRs are disabled when
115*4882a593Smuzhiyun 	 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
116*4882a593Smuzhiyun 	 * memory type is applied to all of physical memory.
117*4882a593Smuzhiyun 	 *
118*4882a593Smuzhiyun 	 * However, virtual machines can be run with CPUID such that
119*4882a593Smuzhiyun 	 * there are no MTRRs.  In that case, the firmware will never
120*4882a593Smuzhiyun 	 * enable MTRRs and it is obviously undesirable to run the
121*4882a593Smuzhiyun 	 * guest entirely with UC memory and we use WB.
122*4882a593Smuzhiyun 	 */
123*4882a593Smuzhiyun 	if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
124*4882a593Smuzhiyun 		return MTRR_TYPE_UNCACHABLE;
125*4882a593Smuzhiyun 	else
126*4882a593Smuzhiyun 		return MTRR_TYPE_WRBACK;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * Three terms are used in the following code:
131*4882a593Smuzhiyun * - segment, it indicates the address segments covered by fixed MTRRs.
132*4882a593Smuzhiyun * - unit, it corresponds to the MSR entry in the segment.
133*4882a593Smuzhiyun * - range, a range is covered in one memory cache type.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun struct fixed_mtrr_segment {
136*4882a593Smuzhiyun 	u64 start;
137*4882a593Smuzhiyun 	u64 end;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	int range_shift;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* the start position in kvm_mtrr.fixed_ranges[]. */
142*4882a593Smuzhiyun 	int range_start;
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun static struct fixed_mtrr_segment fixed_seg_table[] = {
146*4882a593Smuzhiyun 	/* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
147*4882a593Smuzhiyun 	{
148*4882a593Smuzhiyun 		.start = 0x0,
149*4882a593Smuzhiyun 		.end = 0x80000,
150*4882a593Smuzhiyun 		.range_shift = 16, /* 64K */
151*4882a593Smuzhiyun 		.range_start = 0,
152*4882a593Smuzhiyun 	},
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/*
155*4882a593Smuzhiyun 	 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
156*4882a593Smuzhiyun 	 * 16K fixed mtrr.
157*4882a593Smuzhiyun 	 */
158*4882a593Smuzhiyun 	{
159*4882a593Smuzhiyun 		.start = 0x80000,
160*4882a593Smuzhiyun 		.end = 0xc0000,
161*4882a593Smuzhiyun 		.range_shift = 14, /* 16K */
162*4882a593Smuzhiyun 		.range_start = 8,
163*4882a593Smuzhiyun 	},
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/*
166*4882a593Smuzhiyun 	 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
167*4882a593Smuzhiyun 	 * 4K fixed mtrr.
168*4882a593Smuzhiyun 	 */
169*4882a593Smuzhiyun 	{
170*4882a593Smuzhiyun 		.start = 0xc0000,
171*4882a593Smuzhiyun 		.end = 0x100000,
172*4882a593Smuzhiyun 		.range_shift = 12, /* 12K */
173*4882a593Smuzhiyun 		.range_start = 24,
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun  * The size of unit is covered in one MSR, one MSR entry contains
179*4882a593Smuzhiyun  * 8 ranges so that unit size is always 8 * 2^range_shift.
180*4882a593Smuzhiyun  */
fixed_mtrr_seg_unit_size(int seg)181*4882a593Smuzhiyun static u64 fixed_mtrr_seg_unit_size(int seg)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	return 8 << fixed_seg_table[seg].range_shift;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
fixed_msr_to_seg_unit(u32 msr,int * seg,int * unit)186*4882a593Smuzhiyun static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	switch (msr) {
189*4882a593Smuzhiyun 	case MSR_MTRRfix64K_00000:
190*4882a593Smuzhiyun 		*seg = 0;
191*4882a593Smuzhiyun 		*unit = 0;
192*4882a593Smuzhiyun 		break;
193*4882a593Smuzhiyun 	case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
194*4882a593Smuzhiyun 		*seg = 1;
195*4882a593Smuzhiyun 		*unit = array_index_nospec(
196*4882a593Smuzhiyun 			msr - MSR_MTRRfix16K_80000,
197*4882a593Smuzhiyun 			MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
198*4882a593Smuzhiyun 		break;
199*4882a593Smuzhiyun 	case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
200*4882a593Smuzhiyun 		*seg = 2;
201*4882a593Smuzhiyun 		*unit = array_index_nospec(
202*4882a593Smuzhiyun 			msr - MSR_MTRRfix4K_C0000,
203*4882a593Smuzhiyun 			MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
204*4882a593Smuzhiyun 		break;
205*4882a593Smuzhiyun 	default:
206*4882a593Smuzhiyun 		return false;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return true;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
fixed_mtrr_seg_unit_range(int seg,int unit,u64 * start,u64 * end)212*4882a593Smuzhiyun static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
215*4882a593Smuzhiyun 	u64 unit_size = fixed_mtrr_seg_unit_size(seg);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	*start = mtrr_seg->start + unit * unit_size;
218*4882a593Smuzhiyun 	*end = *start + unit_size;
219*4882a593Smuzhiyun 	WARN_ON(*end > mtrr_seg->end);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
fixed_mtrr_seg_unit_range_index(int seg,int unit)222*4882a593Smuzhiyun static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
227*4882a593Smuzhiyun 		> mtrr_seg->end);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* each unit has 8 ranges. */
230*4882a593Smuzhiyun 	return mtrr_seg->range_start + 8 * unit;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
fixed_mtrr_seg_end_range_index(int seg)233*4882a593Smuzhiyun static int fixed_mtrr_seg_end_range_index(int seg)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
236*4882a593Smuzhiyun 	int n;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
239*4882a593Smuzhiyun 	return mtrr_seg->range_start + n - 1;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
fixed_msr_to_range(u32 msr,u64 * start,u64 * end)242*4882a593Smuzhiyun static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	int seg, unit;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
247*4882a593Smuzhiyun 		return false;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	fixed_mtrr_seg_unit_range(seg, unit, start, end);
250*4882a593Smuzhiyun 	return true;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
fixed_msr_to_range_index(u32 msr)253*4882a593Smuzhiyun static int fixed_msr_to_range_index(u32 msr)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	int seg, unit;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
258*4882a593Smuzhiyun 		return -1;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	return fixed_mtrr_seg_unit_range_index(seg, unit);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
fixed_mtrr_addr_to_seg(u64 addr)263*4882a593Smuzhiyun static int fixed_mtrr_addr_to_seg(u64 addr)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	struct fixed_mtrr_segment *mtrr_seg;
266*4882a593Smuzhiyun 	int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	for (seg = 0; seg < seg_num; seg++) {
269*4882a593Smuzhiyun 		mtrr_seg = &fixed_seg_table[seg];
270*4882a593Smuzhiyun 		if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
271*4882a593Smuzhiyun 			return seg;
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return -1;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
fixed_mtrr_addr_seg_to_range_index(u64 addr,int seg)277*4882a593Smuzhiyun static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	struct fixed_mtrr_segment *mtrr_seg;
280*4882a593Smuzhiyun 	int index;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	mtrr_seg = &fixed_seg_table[seg];
283*4882a593Smuzhiyun 	index = mtrr_seg->range_start;
284*4882a593Smuzhiyun 	index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
285*4882a593Smuzhiyun 	return index;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
fixed_mtrr_range_end_addr(int seg,int index)288*4882a593Smuzhiyun static u64 fixed_mtrr_range_end_addr(int seg, int index)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
291*4882a593Smuzhiyun 	int pos = index - mtrr_seg->range_start;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
var_mtrr_range(struct kvm_mtrr_range * range,u64 * start,u64 * end)296*4882a593Smuzhiyun static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	u64 mask;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	*start = range->base & PAGE_MASK;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	mask = range->mask & PAGE_MASK;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* This cannot overflow because writing to the reserved bits of
305*4882a593Smuzhiyun 	 * variable MTRRs causes a #GP.
306*4882a593Smuzhiyun 	 */
307*4882a593Smuzhiyun 	*end = (*start | ~mask) + 1;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
update_mtrr(struct kvm_vcpu * vcpu,u32 msr)310*4882a593Smuzhiyun static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
313*4882a593Smuzhiyun 	gfn_t start, end;
314*4882a593Smuzhiyun 	int index;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
317*4882a593Smuzhiyun 	      !kvm_arch_has_noncoherent_dma(vcpu->kvm))
318*4882a593Smuzhiyun 		return;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
321*4882a593Smuzhiyun 		return;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* fixed MTRRs. */
324*4882a593Smuzhiyun 	if (fixed_msr_to_range(msr, &start, &end)) {
325*4882a593Smuzhiyun 		if (!fixed_mtrr_is_enabled(mtrr_state))
326*4882a593Smuzhiyun 			return;
327*4882a593Smuzhiyun 	} else if (msr == MSR_MTRRdefType) {
328*4882a593Smuzhiyun 		start = 0x0;
329*4882a593Smuzhiyun 		end = ~0ULL;
330*4882a593Smuzhiyun 	} else {
331*4882a593Smuzhiyun 		/* variable range MTRRs. */
332*4882a593Smuzhiyun 		index = (msr - 0x200) / 2;
333*4882a593Smuzhiyun 		var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
var_mtrr_range_is_valid(struct kvm_mtrr_range * range)339*4882a593Smuzhiyun static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	return (range->mask & (1 << 11)) != 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
set_var_mtrr_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data)344*4882a593Smuzhiyun static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
347*4882a593Smuzhiyun 	struct kvm_mtrr_range *tmp, *cur;
348*4882a593Smuzhiyun 	int index, is_mtrr_mask;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	index = (msr - 0x200) / 2;
351*4882a593Smuzhiyun 	is_mtrr_mask = msr - 0x200 - 2 * index;
352*4882a593Smuzhiyun 	cur = &mtrr_state->var_ranges[index];
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	/* remove the entry if it's in the list. */
355*4882a593Smuzhiyun 	if (var_mtrr_range_is_valid(cur))
356*4882a593Smuzhiyun 		list_del(&mtrr_state->var_ranges[index].node);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	/* Extend the mask with all 1 bits to the left, since those
359*4882a593Smuzhiyun 	 * bits must implicitly be 0.  The bits are then cleared
360*4882a593Smuzhiyun 	 * when reading them.
361*4882a593Smuzhiyun 	 */
362*4882a593Smuzhiyun 	if (!is_mtrr_mask)
363*4882a593Smuzhiyun 		cur->base = data;
364*4882a593Smuzhiyun 	else
365*4882a593Smuzhiyun 		cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* add it to the list if it's enabled. */
368*4882a593Smuzhiyun 	if (var_mtrr_range_is_valid(cur)) {
369*4882a593Smuzhiyun 		list_for_each_entry(tmp, &mtrr_state->head, node)
370*4882a593Smuzhiyun 			if (cur->base >= tmp->base)
371*4882a593Smuzhiyun 				break;
372*4882a593Smuzhiyun 		list_add_tail(&cur->node, &tmp->node);
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
kvm_mtrr_set_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data)376*4882a593Smuzhiyun int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	int index;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	if (!kvm_mtrr_valid(vcpu, msr, data))
381*4882a593Smuzhiyun 		return 1;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	index = fixed_msr_to_range_index(msr);
384*4882a593Smuzhiyun 	if (index >= 0)
385*4882a593Smuzhiyun 		*(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
386*4882a593Smuzhiyun 	else if (msr == MSR_MTRRdefType)
387*4882a593Smuzhiyun 		vcpu->arch.mtrr_state.deftype = data;
388*4882a593Smuzhiyun 	else if (msr == MSR_IA32_CR_PAT)
389*4882a593Smuzhiyun 		vcpu->arch.pat = data;
390*4882a593Smuzhiyun 	else
391*4882a593Smuzhiyun 		set_var_mtrr_msr(vcpu, msr, data);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	update_mtrr(vcpu, msr);
394*4882a593Smuzhiyun 	return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
kvm_mtrr_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)397*4882a593Smuzhiyun int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	int index;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* MSR_MTRRcap is a readonly MSR. */
402*4882a593Smuzhiyun 	if (msr == MSR_MTRRcap) {
403*4882a593Smuzhiyun 		/*
404*4882a593Smuzhiyun 		 * SMRR = 0
405*4882a593Smuzhiyun 		 * WC = 1
406*4882a593Smuzhiyun 		 * FIX = 1
407*4882a593Smuzhiyun 		 * VCNT = KVM_NR_VAR_MTRR
408*4882a593Smuzhiyun 		 */
409*4882a593Smuzhiyun 		*pdata = 0x500 | KVM_NR_VAR_MTRR;
410*4882a593Smuzhiyun 		return 0;
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (!msr_mtrr_valid(msr))
414*4882a593Smuzhiyun 		return 1;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	index = fixed_msr_to_range_index(msr);
417*4882a593Smuzhiyun 	if (index >= 0)
418*4882a593Smuzhiyun 		*pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
419*4882a593Smuzhiyun 	else if (msr == MSR_MTRRdefType)
420*4882a593Smuzhiyun 		*pdata = vcpu->arch.mtrr_state.deftype;
421*4882a593Smuzhiyun 	else if (msr == MSR_IA32_CR_PAT)
422*4882a593Smuzhiyun 		*pdata = vcpu->arch.pat;
423*4882a593Smuzhiyun 	else {	/* Variable MTRRs */
424*4882a593Smuzhiyun 		int is_mtrr_mask;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 		index = (msr - 0x200) / 2;
427*4882a593Smuzhiyun 		is_mtrr_mask = msr - 0x200 - 2 * index;
428*4882a593Smuzhiyun 		if (!is_mtrr_mask)
429*4882a593Smuzhiyun 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
430*4882a593Smuzhiyun 		else
431*4882a593Smuzhiyun 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 		*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	return 0;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
kvm_vcpu_mtrr_init(struct kvm_vcpu * vcpu)439*4882a593Smuzhiyun void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun struct mtrr_iter {
445*4882a593Smuzhiyun 	/* input fields. */
446*4882a593Smuzhiyun 	struct kvm_mtrr *mtrr_state;
447*4882a593Smuzhiyun 	u64 start;
448*4882a593Smuzhiyun 	u64 end;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/* output fields. */
451*4882a593Smuzhiyun 	int mem_type;
452*4882a593Smuzhiyun 	/* mtrr is completely disabled? */
453*4882a593Smuzhiyun 	bool mtrr_disabled;
454*4882a593Smuzhiyun 	/* [start, end) is not fully covered in MTRRs? */
455*4882a593Smuzhiyun 	bool partial_map;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* private fields. */
458*4882a593Smuzhiyun 	union {
459*4882a593Smuzhiyun 		/* used for fixed MTRRs. */
460*4882a593Smuzhiyun 		struct {
461*4882a593Smuzhiyun 			int index;
462*4882a593Smuzhiyun 			int seg;
463*4882a593Smuzhiyun 		};
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 		/* used for var MTRRs. */
466*4882a593Smuzhiyun 		struct {
467*4882a593Smuzhiyun 			struct kvm_mtrr_range *range;
468*4882a593Smuzhiyun 			/* max address has been covered in var MTRRs. */
469*4882a593Smuzhiyun 			u64 start_max;
470*4882a593Smuzhiyun 		};
471*4882a593Smuzhiyun 	};
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	bool fixed;
474*4882a593Smuzhiyun };
475*4882a593Smuzhiyun 
mtrr_lookup_fixed_start(struct mtrr_iter * iter)476*4882a593Smuzhiyun static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	int seg, index;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (!fixed_mtrr_is_enabled(iter->mtrr_state))
481*4882a593Smuzhiyun 		return false;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	seg = fixed_mtrr_addr_to_seg(iter->start);
484*4882a593Smuzhiyun 	if (seg < 0)
485*4882a593Smuzhiyun 		return false;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	iter->fixed = true;
488*4882a593Smuzhiyun 	index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
489*4882a593Smuzhiyun 	iter->index = index;
490*4882a593Smuzhiyun 	iter->seg = seg;
491*4882a593Smuzhiyun 	return true;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
match_var_range(struct mtrr_iter * iter,struct kvm_mtrr_range * range)494*4882a593Smuzhiyun static bool match_var_range(struct mtrr_iter *iter,
495*4882a593Smuzhiyun 			    struct kvm_mtrr_range *range)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	u64 start, end;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	var_mtrr_range(range, &start, &end);
500*4882a593Smuzhiyun 	if (!(start >= iter->end || end <= iter->start)) {
501*4882a593Smuzhiyun 		iter->range = range;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 		/*
504*4882a593Smuzhiyun 		 * the function is called when we do kvm_mtrr.head walking.
505*4882a593Smuzhiyun 		 * Range has the minimum base address which interleaves
506*4882a593Smuzhiyun 		 * [looker->start_max, looker->end).
507*4882a593Smuzhiyun 		 */
508*4882a593Smuzhiyun 		iter->partial_map |= iter->start_max < start;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		/* update the max address has been covered. */
511*4882a593Smuzhiyun 		iter->start_max = max(iter->start_max, end);
512*4882a593Smuzhiyun 		return true;
513*4882a593Smuzhiyun 	}
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	return false;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
__mtrr_lookup_var_next(struct mtrr_iter * iter)518*4882a593Smuzhiyun static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
523*4882a593Smuzhiyun 		if (match_var_range(iter, iter->range))
524*4882a593Smuzhiyun 			return;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	iter->range = NULL;
527*4882a593Smuzhiyun 	iter->partial_map |= iter->start_max < iter->end;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
mtrr_lookup_var_start(struct mtrr_iter * iter)530*4882a593Smuzhiyun static void mtrr_lookup_var_start(struct mtrr_iter *iter)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	iter->fixed = false;
535*4882a593Smuzhiyun 	iter->start_max = iter->start;
536*4882a593Smuzhiyun 	iter->range = NULL;
537*4882a593Smuzhiyun 	iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	__mtrr_lookup_var_next(iter);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun 
mtrr_lookup_fixed_next(struct mtrr_iter * iter)542*4882a593Smuzhiyun static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	/* terminate the lookup. */
545*4882a593Smuzhiyun 	if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
546*4882a593Smuzhiyun 		iter->fixed = false;
547*4882a593Smuzhiyun 		iter->range = NULL;
548*4882a593Smuzhiyun 		return;
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	iter->index++;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* have looked up for all fixed MTRRs. */
554*4882a593Smuzhiyun 	if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
555*4882a593Smuzhiyun 		return mtrr_lookup_var_start(iter);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/* switch to next segment. */
558*4882a593Smuzhiyun 	if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
559*4882a593Smuzhiyun 		iter->seg++;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
mtrr_lookup_var_next(struct mtrr_iter * iter)562*4882a593Smuzhiyun static void mtrr_lookup_var_next(struct mtrr_iter *iter)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	__mtrr_lookup_var_next(iter);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
mtrr_lookup_start(struct mtrr_iter * iter)567*4882a593Smuzhiyun static void mtrr_lookup_start(struct mtrr_iter *iter)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	if (!mtrr_is_enabled(iter->mtrr_state)) {
570*4882a593Smuzhiyun 		iter->mtrr_disabled = true;
571*4882a593Smuzhiyun 		return;
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (!mtrr_lookup_fixed_start(iter))
575*4882a593Smuzhiyun 		mtrr_lookup_var_start(iter);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun 
mtrr_lookup_init(struct mtrr_iter * iter,struct kvm_mtrr * mtrr_state,u64 start,u64 end)578*4882a593Smuzhiyun static void mtrr_lookup_init(struct mtrr_iter *iter,
579*4882a593Smuzhiyun 			     struct kvm_mtrr *mtrr_state, u64 start, u64 end)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	iter->mtrr_state = mtrr_state;
582*4882a593Smuzhiyun 	iter->start = start;
583*4882a593Smuzhiyun 	iter->end = end;
584*4882a593Smuzhiyun 	iter->mtrr_disabled = false;
585*4882a593Smuzhiyun 	iter->partial_map = false;
586*4882a593Smuzhiyun 	iter->fixed = false;
587*4882a593Smuzhiyun 	iter->range = NULL;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	mtrr_lookup_start(iter);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
mtrr_lookup_okay(struct mtrr_iter * iter)592*4882a593Smuzhiyun static bool mtrr_lookup_okay(struct mtrr_iter *iter)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	if (iter->fixed) {
595*4882a593Smuzhiyun 		iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
596*4882a593Smuzhiyun 		return true;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	if (iter->range) {
600*4882a593Smuzhiyun 		iter->mem_type = iter->range->base & 0xff;
601*4882a593Smuzhiyun 		return true;
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	return false;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
mtrr_lookup_next(struct mtrr_iter * iter)607*4882a593Smuzhiyun static void mtrr_lookup_next(struct mtrr_iter *iter)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	if (iter->fixed)
610*4882a593Smuzhiyun 		mtrr_lookup_fixed_next(iter);
611*4882a593Smuzhiyun 	else
612*4882a593Smuzhiyun 		mtrr_lookup_var_next(iter);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
616*4882a593Smuzhiyun 	for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
617*4882a593Smuzhiyun 	     mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
618*4882a593Smuzhiyun 
kvm_mtrr_get_guest_memory_type(struct kvm_vcpu * vcpu,gfn_t gfn)619*4882a593Smuzhiyun u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
622*4882a593Smuzhiyun 	struct mtrr_iter iter;
623*4882a593Smuzhiyun 	u64 start, end;
624*4882a593Smuzhiyun 	int type = -1;
625*4882a593Smuzhiyun 	const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
626*4882a593Smuzhiyun 			       | (1 << MTRR_TYPE_WRTHROUGH);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	start = gfn_to_gpa(gfn);
629*4882a593Smuzhiyun 	end = start + PAGE_SIZE;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
632*4882a593Smuzhiyun 		int curr_type = iter.mem_type;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 		/*
635*4882a593Smuzhiyun 		 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
636*4882a593Smuzhiyun 		 * Precedences.
637*4882a593Smuzhiyun 		 */
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 		if (type == -1) {
640*4882a593Smuzhiyun 			type = curr_type;
641*4882a593Smuzhiyun 			continue;
642*4882a593Smuzhiyun 		}
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 		/*
645*4882a593Smuzhiyun 		 * If two or more variable memory ranges match and the
646*4882a593Smuzhiyun 		 * memory types are identical, then that memory type is
647*4882a593Smuzhiyun 		 * used.
648*4882a593Smuzhiyun 		 */
649*4882a593Smuzhiyun 		if (type == curr_type)
650*4882a593Smuzhiyun 			continue;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 		/*
653*4882a593Smuzhiyun 		 * If two or more variable memory ranges match and one of
654*4882a593Smuzhiyun 		 * the memory types is UC, the UC memory type used.
655*4882a593Smuzhiyun 		 */
656*4882a593Smuzhiyun 		if (curr_type == MTRR_TYPE_UNCACHABLE)
657*4882a593Smuzhiyun 			return MTRR_TYPE_UNCACHABLE;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 		/*
660*4882a593Smuzhiyun 		 * If two or more variable memory ranges match and the
661*4882a593Smuzhiyun 		 * memory types are WT and WB, the WT memory type is used.
662*4882a593Smuzhiyun 		 */
663*4882a593Smuzhiyun 		if (((1 << type) & wt_wb_mask) &&
664*4882a593Smuzhiyun 		      ((1 << curr_type) & wt_wb_mask)) {
665*4882a593Smuzhiyun 			type = MTRR_TYPE_WRTHROUGH;
666*4882a593Smuzhiyun 			continue;
667*4882a593Smuzhiyun 		}
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 		/*
670*4882a593Smuzhiyun 		 * For overlaps not defined by the above rules, processor
671*4882a593Smuzhiyun 		 * behavior is undefined.
672*4882a593Smuzhiyun 		 */
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 		/* We use WB for this undefined behavior. :( */
675*4882a593Smuzhiyun 		return MTRR_TYPE_WRBACK;
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if (iter.mtrr_disabled)
679*4882a593Smuzhiyun 		return mtrr_disabled_type(vcpu);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	/* not contained in any MTRRs. */
682*4882a593Smuzhiyun 	if (type == -1)
683*4882a593Smuzhiyun 		return mtrr_default_type(mtrr_state);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/*
686*4882a593Smuzhiyun 	 * We just check one page, partially covered by MTRRs is
687*4882a593Smuzhiyun 	 * impossible.
688*4882a593Smuzhiyun 	 */
689*4882a593Smuzhiyun 	WARN_ON(iter.partial_map);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	return type;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
694*4882a593Smuzhiyun 
kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu * vcpu,gfn_t gfn,int page_num)695*4882a593Smuzhiyun bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
696*4882a593Smuzhiyun 					  int page_num)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
699*4882a593Smuzhiyun 	struct mtrr_iter iter;
700*4882a593Smuzhiyun 	u64 start, end;
701*4882a593Smuzhiyun 	int type = -1;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	start = gfn_to_gpa(gfn);
704*4882a593Smuzhiyun 	end = gfn_to_gpa(gfn + page_num);
705*4882a593Smuzhiyun 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
706*4882a593Smuzhiyun 		if (type == -1) {
707*4882a593Smuzhiyun 			type = iter.mem_type;
708*4882a593Smuzhiyun 			continue;
709*4882a593Smuzhiyun 		}
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 		if (type != iter.mem_type)
712*4882a593Smuzhiyun 			return false;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	if (iter.mtrr_disabled)
716*4882a593Smuzhiyun 		return true;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	if (!iter.partial_map)
719*4882a593Smuzhiyun 		return true;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	if (type == -1)
722*4882a593Smuzhiyun 		return true;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	return type == mtrr_default_type(mtrr_state);
725*4882a593Smuzhiyun }
726