xref: /OK3568_Linux_fs/kernel/arch/x86/boot/cpuflags.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/types.h>
3*4882a593Smuzhiyun #include "bitops.h"
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm/processor-flags.h>
6*4882a593Smuzhiyun #include <asm/required-features.h>
7*4882a593Smuzhiyun #include <asm/msr-index.h>
8*4882a593Smuzhiyun #include "cpuflags.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun struct cpu_features cpu;
11*4882a593Smuzhiyun u32 cpu_vendor[3];
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun static bool loaded_flags;
14*4882a593Smuzhiyun 
has_fpu(void)15*4882a593Smuzhiyun static int has_fpu(void)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	u16 fcw = -1, fsw = -1;
18*4882a593Smuzhiyun 	unsigned long cr0;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	asm volatile("mov %%cr0,%0" : "=r" (cr0));
21*4882a593Smuzhiyun 	if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
22*4882a593Smuzhiyun 		cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
23*4882a593Smuzhiyun 		asm volatile("mov %0,%%cr0" : : "r" (cr0));
24*4882a593Smuzhiyun 	}
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
27*4882a593Smuzhiyun 		     : "+m" (fsw), "+m" (fcw));
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	return fsw == 0 && (fcw & 0x103f) == 0x003f;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * For building the 16-bit code we want to explicitly specify 32-bit
34*4882a593Smuzhiyun  * push/pop operations, rather than just saying 'pushf' or 'popf' and
35*4882a593Smuzhiyun  * letting the compiler choose. But this is also included from the
36*4882a593Smuzhiyun  * compressed/ directory where it may be 64-bit code, and thus needs
37*4882a593Smuzhiyun  * to be 'pushfq' or 'popfq' in that case.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #ifdef __x86_64__
40*4882a593Smuzhiyun #define PUSHF "pushfq"
41*4882a593Smuzhiyun #define POPF "popfq"
42*4882a593Smuzhiyun #else
43*4882a593Smuzhiyun #define PUSHF "pushfl"
44*4882a593Smuzhiyun #define POPF "popfl"
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun 
has_eflag(unsigned long mask)47*4882a593Smuzhiyun int has_eflag(unsigned long mask)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	unsigned long f0, f1;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	asm volatile(PUSHF "	\n\t"
52*4882a593Smuzhiyun 		     PUSHF "	\n\t"
53*4882a593Smuzhiyun 		     "pop %0	\n\t"
54*4882a593Smuzhiyun 		     "mov %0,%1	\n\t"
55*4882a593Smuzhiyun 		     "xor %2,%1	\n\t"
56*4882a593Smuzhiyun 		     "push %1	\n\t"
57*4882a593Smuzhiyun 		     POPF "	\n\t"
58*4882a593Smuzhiyun 		     PUSHF "	\n\t"
59*4882a593Smuzhiyun 		     "pop %1	\n\t"
60*4882a593Smuzhiyun 		     POPF
61*4882a593Smuzhiyun 		     : "=&r" (f0), "=&r" (f1)
62*4882a593Smuzhiyun 		     : "ri" (mask));
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	return !!((f0^f1) & mask);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /* Handle x86_32 PIC using ebx. */
68*4882a593Smuzhiyun #if defined(__i386__) && defined(__PIC__)
69*4882a593Smuzhiyun # define EBX_REG "=r"
70*4882a593Smuzhiyun #else
71*4882a593Smuzhiyun # define EBX_REG "=b"
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun 
cpuid_count(u32 id,u32 count,u32 * a,u32 * b,u32 * c,u32 * d)74*4882a593Smuzhiyun static inline void cpuid_count(u32 id, u32 count,
75*4882a593Smuzhiyun 		u32 *a, u32 *b, u32 *c, u32 *d)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	asm volatile(".ifnc %%ebx,%3 ; movl  %%ebx,%3 ; .endif	\n\t"
78*4882a593Smuzhiyun 		     "cpuid					\n\t"
79*4882a593Smuzhiyun 		     ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif	\n\t"
80*4882a593Smuzhiyun 		    : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
81*4882a593Smuzhiyun 		    : "a" (id), "c" (count)
82*4882a593Smuzhiyun 	);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define cpuid(id, a, b, c, d) cpuid_count(id, 0, a, b, c, d)
86*4882a593Smuzhiyun 
get_cpuflags(void)87*4882a593Smuzhiyun void get_cpuflags(void)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	u32 max_intel_level, max_amd_level;
90*4882a593Smuzhiyun 	u32 tfms;
91*4882a593Smuzhiyun 	u32 ignored;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (loaded_flags)
94*4882a593Smuzhiyun 		return;
95*4882a593Smuzhiyun 	loaded_flags = true;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (has_fpu())
98*4882a593Smuzhiyun 		set_bit(X86_FEATURE_FPU, cpu.flags);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (has_eflag(X86_EFLAGS_ID)) {
101*4882a593Smuzhiyun 		cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
102*4882a593Smuzhiyun 		      &cpu_vendor[1]);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 		if (max_intel_level >= 0x00000001 &&
105*4882a593Smuzhiyun 		    max_intel_level <= 0x0000ffff) {
106*4882a593Smuzhiyun 			cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
107*4882a593Smuzhiyun 			      &cpu.flags[0]);
108*4882a593Smuzhiyun 			cpu.level = (tfms >> 8) & 15;
109*4882a593Smuzhiyun 			cpu.family = cpu.level;
110*4882a593Smuzhiyun 			cpu.model = (tfms >> 4) & 15;
111*4882a593Smuzhiyun 			if (cpu.level >= 6)
112*4882a593Smuzhiyun 				cpu.model += ((tfms >> 16) & 0xf) << 4;
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		if (max_intel_level >= 0x00000007) {
116*4882a593Smuzhiyun 			cpuid_count(0x00000007, 0, &ignored, &ignored,
117*4882a593Smuzhiyun 					&cpu.flags[16], &ignored);
118*4882a593Smuzhiyun 		}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 		cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
121*4882a593Smuzhiyun 		      &ignored);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 		if (max_amd_level >= 0x80000001 &&
124*4882a593Smuzhiyun 		    max_amd_level <= 0x8000ffff) {
125*4882a593Smuzhiyun 			cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
126*4882a593Smuzhiyun 			      &cpu.flags[1]);
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun }
130