xref: /OK3568_Linux_fs/kernel/arch/x86/boot/cpucheck.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* -*- linux-c -*- ------------------------------------------------------- *
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  *   Copyright (C) 1991, 1992 Linus Torvalds
5*4882a593Smuzhiyun  *   Copyright 2007 rPath, Inc. - All Rights Reserved
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * ----------------------------------------------------------------------- */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * Check for obligatory CPU features and abort if the features are not
11*4882a593Smuzhiyun  * present.  This code should be compilable as 16-, 32- or 64-bit
12*4882a593Smuzhiyun  * code, so be very careful with types and inline assembly.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * This code should not contain any messages; that requires an
15*4882a593Smuzhiyun  * additional wrapper.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * As written, this code is not safe for inclusion into the kernel
18*4882a593Smuzhiyun  * proper (after FPU initialization, in particular).
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #ifdef _SETUP
22*4882a593Smuzhiyun # include "boot.h"
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun #include <linux/types.h>
25*4882a593Smuzhiyun #include <asm/intel-family.h>
26*4882a593Smuzhiyun #include <asm/processor-flags.h>
27*4882a593Smuzhiyun #include <asm/required-features.h>
28*4882a593Smuzhiyun #include <asm/msr-index.h>
29*4882a593Smuzhiyun #include "string.h"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static u32 err_flags[NCAPINTS];
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static const u32 req_flags[NCAPINTS] =
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	REQUIRED_MASK0,
38*4882a593Smuzhiyun 	REQUIRED_MASK1,
39*4882a593Smuzhiyun 	0, /* REQUIRED_MASK2 not implemented in this file */
40*4882a593Smuzhiyun 	0, /* REQUIRED_MASK3 not implemented in this file */
41*4882a593Smuzhiyun 	REQUIRED_MASK4,
42*4882a593Smuzhiyun 	0, /* REQUIRED_MASK5 not implemented in this file */
43*4882a593Smuzhiyun 	REQUIRED_MASK6,
44*4882a593Smuzhiyun 	0, /* REQUIRED_MASK7 not implemented in this file */
45*4882a593Smuzhiyun 	0, /* REQUIRED_MASK8 not implemented in this file */
46*4882a593Smuzhiyun 	0, /* REQUIRED_MASK9 not implemented in this file */
47*4882a593Smuzhiyun 	0, /* REQUIRED_MASK10 not implemented in this file */
48*4882a593Smuzhiyun 	0, /* REQUIRED_MASK11 not implemented in this file */
49*4882a593Smuzhiyun 	0, /* REQUIRED_MASK12 not implemented in this file */
50*4882a593Smuzhiyun 	0, /* REQUIRED_MASK13 not implemented in this file */
51*4882a593Smuzhiyun 	0, /* REQUIRED_MASK14 not implemented in this file */
52*4882a593Smuzhiyun 	0, /* REQUIRED_MASK15 not implemented in this file */
53*4882a593Smuzhiyun 	REQUIRED_MASK16,
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
57*4882a593Smuzhiyun 
is_amd(void)58*4882a593Smuzhiyun static int is_amd(void)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
61*4882a593Smuzhiyun 	       cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
62*4882a593Smuzhiyun 	       cpu_vendor[2] == A32('c', 'A', 'M', 'D');
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
is_centaur(void)65*4882a593Smuzhiyun static int is_centaur(void)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
68*4882a593Smuzhiyun 	       cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
69*4882a593Smuzhiyun 	       cpu_vendor[2] == A32('a', 'u', 'l', 's');
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
is_transmeta(void)72*4882a593Smuzhiyun static int is_transmeta(void)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
75*4882a593Smuzhiyun 	       cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
76*4882a593Smuzhiyun 	       cpu_vendor[2] == A32('M', 'x', '8', '6');
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
is_intel(void)79*4882a593Smuzhiyun static int is_intel(void)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
82*4882a593Smuzhiyun 	       cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
83*4882a593Smuzhiyun 	       cpu_vendor[2] == A32('n', 't', 'e', 'l');
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /* Returns a bitmask of which words we have error bits in */
check_cpuflags(void)87*4882a593Smuzhiyun static int check_cpuflags(void)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	u32 err;
90*4882a593Smuzhiyun 	int i;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	err = 0;
93*4882a593Smuzhiyun 	for (i = 0; i < NCAPINTS; i++) {
94*4882a593Smuzhiyun 		err_flags[i] = req_flags[i] & ~cpu.flags[i];
95*4882a593Smuzhiyun 		if (err_flags[i])
96*4882a593Smuzhiyun 			err |= 1 << i;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return err;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun  * Returns -1 on error.
104*4882a593Smuzhiyun  *
105*4882a593Smuzhiyun  * *cpu_level is set to the current CPU level; *req_level to the required
106*4882a593Smuzhiyun  * level.  x86-64 is considered level 64 for this purpose.
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  * *err_flags_ptr is set to the flags error array if there are flags missing.
109*4882a593Smuzhiyun  */
check_cpu(int * cpu_level_ptr,int * req_level_ptr,u32 ** err_flags_ptr)110*4882a593Smuzhiyun int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	int err;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	memset(&cpu.flags, 0, sizeof(cpu.flags));
115*4882a593Smuzhiyun 	cpu.level = 3;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (has_eflag(X86_EFLAGS_AC))
118*4882a593Smuzhiyun 		cpu.level = 4;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	get_cpuflags();
121*4882a593Smuzhiyun 	err = check_cpuflags();
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (test_bit(X86_FEATURE_LM, cpu.flags))
124*4882a593Smuzhiyun 		cpu.level = 64;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (err == 0x01 &&
127*4882a593Smuzhiyun 	    !(err_flags[0] &
128*4882a593Smuzhiyun 	      ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
129*4882a593Smuzhiyun 	    is_amd()) {
130*4882a593Smuzhiyun 		/* If this is an AMD and we're only missing SSE+SSE2, try to
131*4882a593Smuzhiyun 		   turn them on */
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		u32 ecx = MSR_K7_HWCR;
134*4882a593Smuzhiyun 		u32 eax, edx;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
137*4882a593Smuzhiyun 		eax &= ~(1 << 15);
138*4882a593Smuzhiyun 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		get_cpuflags();	/* Make sure it really did something */
141*4882a593Smuzhiyun 		err = check_cpuflags();
142*4882a593Smuzhiyun 	} else if (err == 0x01 &&
143*4882a593Smuzhiyun 		   !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
144*4882a593Smuzhiyun 		   is_centaur() && cpu.model >= 6) {
145*4882a593Smuzhiyun 		/* If this is a VIA C3, we might have to enable CX8
146*4882a593Smuzhiyun 		   explicitly */
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		u32 ecx = MSR_VIA_FCR;
149*4882a593Smuzhiyun 		u32 eax, edx;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
152*4882a593Smuzhiyun 		eax |= (1<<1)|(1<<7);
153*4882a593Smuzhiyun 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		set_bit(X86_FEATURE_CX8, cpu.flags);
156*4882a593Smuzhiyun 		err = check_cpuflags();
157*4882a593Smuzhiyun 	} else if (err == 0x01 && is_transmeta()) {
158*4882a593Smuzhiyun 		/* Transmeta might have masked feature bits in word 0 */
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		u32 ecx = 0x80860004;
161*4882a593Smuzhiyun 		u32 eax, edx;
162*4882a593Smuzhiyun 		u32 level = 1;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
165*4882a593Smuzhiyun 		asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
166*4882a593Smuzhiyun 		asm("cpuid"
167*4882a593Smuzhiyun 		    : "+a" (level), "=d" (cpu.flags[0])
168*4882a593Smuzhiyun 		    : : "ecx", "ebx");
169*4882a593Smuzhiyun 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		err = check_cpuflags();
172*4882a593Smuzhiyun 	} else if (err == 0x01 &&
173*4882a593Smuzhiyun 		   !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
174*4882a593Smuzhiyun 		   is_intel() && cpu.level == 6 &&
175*4882a593Smuzhiyun 		   (cpu.model == 9 || cpu.model == 13)) {
176*4882a593Smuzhiyun 		/* PAE is disabled on this Pentium M but can be forced */
177*4882a593Smuzhiyun 		if (cmdline_find_option_bool("forcepae")) {
178*4882a593Smuzhiyun 			puts("WARNING: Forcing PAE in CPU flags\n");
179*4882a593Smuzhiyun 			set_bit(X86_FEATURE_PAE, cpu.flags);
180*4882a593Smuzhiyun 			err = check_cpuflags();
181*4882a593Smuzhiyun 		}
182*4882a593Smuzhiyun 		else {
183*4882a593Smuzhiyun 			puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
184*4882a593Smuzhiyun 		}
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 	if (!err)
187*4882a593Smuzhiyun 		err = check_knl_erratum();
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (err_flags_ptr)
190*4882a593Smuzhiyun 		*err_flags_ptr = err ? err_flags : NULL;
191*4882a593Smuzhiyun 	if (cpu_level_ptr)
192*4882a593Smuzhiyun 		*cpu_level_ptr = cpu.level;
193*4882a593Smuzhiyun 	if (req_level_ptr)
194*4882a593Smuzhiyun 		*req_level_ptr = req_level;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return (cpu.level < req_level || err) ? -1 : 0;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
check_knl_erratum(void)199*4882a593Smuzhiyun int check_knl_erratum(void)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	/*
202*4882a593Smuzhiyun 	 * First check for the affected model/family:
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 	if (!is_intel() ||
205*4882a593Smuzhiyun 	    cpu.family != 6 ||
206*4882a593Smuzhiyun 	    cpu.model != INTEL_FAM6_XEON_PHI_KNL)
207*4882a593Smuzhiyun 		return 0;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/*
210*4882a593Smuzhiyun 	 * This erratum affects the Accessed/Dirty bits, and can
211*4882a593Smuzhiyun 	 * cause stray bits to be set in !Present PTEs.  We have
212*4882a593Smuzhiyun 	 * enough bits in our 64-bit PTEs (which we have on real
213*4882a593Smuzhiyun 	 * 64-bit mode or PAE) to avoid using these troublesome
214*4882a593Smuzhiyun 	 * bits.  But, we do not have enough space in our 32-bit
215*4882a593Smuzhiyun 	 * PTEs.  So, refuse to run on 32-bit non-PAE kernels.
216*4882a593Smuzhiyun 	 */
217*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
218*4882a593Smuzhiyun 		return 0;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	puts("This 32-bit kernel can not run on this Xeon Phi x200\n"
221*4882a593Smuzhiyun 	     "processor due to a processor erratum.  Use a 64-bit\n"
222*4882a593Smuzhiyun 	     "kernel, or enable PAE in this 32-bit kernel.\n\n");
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return -1;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 
228