xref: /OK3568_Linux_fs/kernel/arch/x86/kernel/cpu/hygon.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Hygon Processor Support for Linux
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Pu Wen <puwen@hygon.cn>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include <linux/io.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/cpu.h>
12*4882a593Smuzhiyun #include <asm/smp.h>
13*4882a593Smuzhiyun #include <asm/numa.h>
14*4882a593Smuzhiyun #include <asm/cacheinfo.h>
15*4882a593Smuzhiyun #include <asm/spec-ctrl.h>
16*4882a593Smuzhiyun #include <asm/delay.h>
17*4882a593Smuzhiyun #ifdef CONFIG_X86_64
18*4882a593Smuzhiyun # include <asm/set_memory.h>
19*4882a593Smuzhiyun #endif
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "cpu.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define APICID_SOCKET_ID_BIT 6
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * nodes_per_socket: Stores the number of nodes per socket.
27*4882a593Smuzhiyun  * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun static u32 nodes_per_socket = 1;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #ifdef CONFIG_NUMA
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * To workaround broken NUMA config.  Read the comment in
34*4882a593Smuzhiyun  * srat_detect_node().
35*4882a593Smuzhiyun  */
nearby_node(int apicid)36*4882a593Smuzhiyun static int nearby_node(int apicid)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	int i, node;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	for (i = apicid - 1; i >= 0; i--) {
41*4882a593Smuzhiyun 		node = __apicid_to_node[i];
42*4882a593Smuzhiyun 		if (node != NUMA_NO_NODE && node_online(node))
43*4882a593Smuzhiyun 			return node;
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
46*4882a593Smuzhiyun 		node = __apicid_to_node[i];
47*4882a593Smuzhiyun 		if (node != NUMA_NO_NODE && node_online(node))
48*4882a593Smuzhiyun 			return node;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun 	return first_node(node_online_map); /* Shouldn't happen */
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun 
hygon_get_topology_early(struct cpuinfo_x86 * c)54*4882a593Smuzhiyun static void hygon_get_topology_early(struct cpuinfo_x86 *c)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	if (cpu_has(c, X86_FEATURE_TOPOEXT))
57*4882a593Smuzhiyun 		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Fixup core topology information for
62*4882a593Smuzhiyun  * (1) Hygon multi-node processors
63*4882a593Smuzhiyun  *     Assumption: Number of cores in each internal node is the same.
64*4882a593Smuzhiyun  * (2) Hygon processors supporting compute units
65*4882a593Smuzhiyun  */
hygon_get_topology(struct cpuinfo_x86 * c)66*4882a593Smuzhiyun static void hygon_get_topology(struct cpuinfo_x86 *c)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	int cpu = smp_processor_id();
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* get information required for multi-node processors */
71*4882a593Smuzhiyun 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
72*4882a593Smuzhiyun 		int err;
73*4882a593Smuzhiyun 		u32 eax, ebx, ecx, edx;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		c->cpu_die_id  = ecx & 0xff;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 		c->cpu_core_id = ebx & 0xff;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		if (smp_num_siblings > 1)
82*4882a593Smuzhiyun 			c->x86_max_cores /= smp_num_siblings;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		/*
85*4882a593Smuzhiyun 		 * In case leaf B is available, use it to derive
86*4882a593Smuzhiyun 		 * topology information.
87*4882a593Smuzhiyun 		 */
88*4882a593Smuzhiyun 		err = detect_extended_topology(c);
89*4882a593Smuzhiyun 		if (!err)
90*4882a593Smuzhiyun 			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		/* Socket ID is ApicId[6] for these processors. */
93*4882a593Smuzhiyun 		c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 		cacheinfo_hygon_init_llc_id(c, cpu);
96*4882a593Smuzhiyun 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
97*4882a593Smuzhiyun 		u64 value;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 		rdmsrl(MSR_FAM10H_NODE_ID, value);
100*4882a593Smuzhiyun 		c->cpu_die_id = value & 7;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 		per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
103*4882a593Smuzhiyun 	} else
104*4882a593Smuzhiyun 		return;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (nodes_per_socket > 1)
107*4882a593Smuzhiyun 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun  * On Hygon setup the lower bits of the APIC id distinguish the cores.
112*4882a593Smuzhiyun  * Assumes number of cores is a power of two.
113*4882a593Smuzhiyun  */
hygon_detect_cmp(struct cpuinfo_x86 * c)114*4882a593Smuzhiyun static void hygon_detect_cmp(struct cpuinfo_x86 *c)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	unsigned int bits;
117*4882a593Smuzhiyun 	int cpu = smp_processor_id();
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	bits = c->x86_coreid_bits;
120*4882a593Smuzhiyun 	/* Low order bits define the core id (index of core in socket) */
121*4882a593Smuzhiyun 	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
122*4882a593Smuzhiyun 	/* Convert the initial APIC ID into the socket ID */
123*4882a593Smuzhiyun 	c->phys_proc_id = c->initial_apicid >> bits;
124*4882a593Smuzhiyun 	/* use socket ID also for last level cache */
125*4882a593Smuzhiyun 	per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
srat_detect_node(struct cpuinfo_x86 * c)128*4882a593Smuzhiyun static void srat_detect_node(struct cpuinfo_x86 *c)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun #ifdef CONFIG_NUMA
131*4882a593Smuzhiyun 	int cpu = smp_processor_id();
132*4882a593Smuzhiyun 	int node;
133*4882a593Smuzhiyun 	unsigned int apicid = c->apicid;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	node = numa_cpu_node(cpu);
136*4882a593Smuzhiyun 	if (node == NUMA_NO_NODE)
137*4882a593Smuzhiyun 		node = per_cpu(cpu_llc_id, cpu);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/*
140*4882a593Smuzhiyun 	 * On multi-fabric platform (e.g. Numascale NumaChip) a
141*4882a593Smuzhiyun 	 * platform-specific handler needs to be called to fixup some
142*4882a593Smuzhiyun 	 * IDs of the CPU.
143*4882a593Smuzhiyun 	 */
144*4882a593Smuzhiyun 	if (x86_cpuinit.fixup_cpu_id)
145*4882a593Smuzhiyun 		x86_cpuinit.fixup_cpu_id(c, node);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (!node_online(node)) {
148*4882a593Smuzhiyun 		/*
149*4882a593Smuzhiyun 		 * Two possibilities here:
150*4882a593Smuzhiyun 		 *
151*4882a593Smuzhiyun 		 * - The CPU is missing memory and no node was created.  In
152*4882a593Smuzhiyun 		 *   that case try picking one from a nearby CPU.
153*4882a593Smuzhiyun 		 *
154*4882a593Smuzhiyun 		 * - The APIC IDs differ from the HyperTransport node IDs.
155*4882a593Smuzhiyun 		 *   Assume they are all increased by a constant offset, but
156*4882a593Smuzhiyun 		 *   in the same order as the HT nodeids.  If that doesn't
157*4882a593Smuzhiyun 		 *   result in a usable node fall back to the path for the
158*4882a593Smuzhiyun 		 *   previous case.
159*4882a593Smuzhiyun 		 *
160*4882a593Smuzhiyun 		 * This workaround operates directly on the mapping between
161*4882a593Smuzhiyun 		 * APIC ID and NUMA node, assuming certain relationship
162*4882a593Smuzhiyun 		 * between APIC ID, HT node ID and NUMA topology.  As going
163*4882a593Smuzhiyun 		 * through CPU mapping may alter the outcome, directly
164*4882a593Smuzhiyun 		 * access __apicid_to_node[].
165*4882a593Smuzhiyun 		 */
166*4882a593Smuzhiyun 		int ht_nodeid = c->initial_apicid;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
169*4882a593Smuzhiyun 			node = __apicid_to_node[ht_nodeid];
170*4882a593Smuzhiyun 		/* Pick a nearby node */
171*4882a593Smuzhiyun 		if (!node_online(node))
172*4882a593Smuzhiyun 			node = nearby_node(apicid);
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 	numa_set_node(cpu, node);
175*4882a593Smuzhiyun #endif
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
early_init_hygon_mc(struct cpuinfo_x86 * c)178*4882a593Smuzhiyun static void early_init_hygon_mc(struct cpuinfo_x86 *c)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun #ifdef CONFIG_SMP
181*4882a593Smuzhiyun 	unsigned int bits, ecx;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Multi core CPU? */
184*4882a593Smuzhiyun 	if (c->extended_cpuid_level < 0x80000008)
185*4882a593Smuzhiyun 		return;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	ecx = cpuid_ecx(0x80000008);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	c->x86_max_cores = (ecx & 0xff) + 1;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/* CPU telling us the core id bits shift? */
192*4882a593Smuzhiyun 	bits = (ecx >> 12) & 0xF;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Otherwise recompute */
195*4882a593Smuzhiyun 	if (bits == 0) {
196*4882a593Smuzhiyun 		while ((1 << bits) < c->x86_max_cores)
197*4882a593Smuzhiyun 			bits++;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	c->x86_coreid_bits = bits;
201*4882a593Smuzhiyun #endif
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
bsp_init_hygon(struct cpuinfo_x86 * c)204*4882a593Smuzhiyun static void bsp_init_hygon(struct cpuinfo_x86 *c)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun #ifdef CONFIG_X86_64
207*4882a593Smuzhiyun 	unsigned long long tseg;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/*
210*4882a593Smuzhiyun 	 * Split up direct mapping around the TSEG SMM area.
211*4882a593Smuzhiyun 	 * Don't do it for gbpages because there seems very little
212*4882a593Smuzhiyun 	 * benefit in doing so.
213*4882a593Smuzhiyun 	 */
214*4882a593Smuzhiyun 	if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
215*4882a593Smuzhiyun 		unsigned long pfn = tseg >> PAGE_SHIFT;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		pr_debug("tseg: %010llx\n", tseg);
218*4882a593Smuzhiyun 		if (pfn_range_is_mapped(pfn, pfn + 1))
219*4882a593Smuzhiyun 			set_memory_4k((unsigned long)__va(tseg), 1);
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
224*4882a593Smuzhiyun 		u64 val;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 		rdmsrl(MSR_K7_HWCR, val);
227*4882a593Smuzhiyun 		if (!(val & BIT(24)))
228*4882a593Smuzhiyun 			pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (cpu_has(c, X86_FEATURE_MWAITX))
232*4882a593Smuzhiyun 		use_mwaitx_delay();
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
235*4882a593Smuzhiyun 		u32 ecx;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		ecx = cpuid_ecx(0x8000001e);
238*4882a593Smuzhiyun 		nodes_per_socket = ((ecx >> 8) & 7) + 1;
239*4882a593Smuzhiyun 	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
240*4882a593Smuzhiyun 		u64 value;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		rdmsrl(MSR_FAM10H_NODE_ID, value);
243*4882a593Smuzhiyun 		nodes_per_socket = ((value >> 3) & 7) + 1;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
247*4882a593Smuzhiyun 	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
248*4882a593Smuzhiyun 		/*
249*4882a593Smuzhiyun 		 * Try to cache the base value so further operations can
250*4882a593Smuzhiyun 		 * avoid RMW. If that faults, do not enable SSBD.
251*4882a593Smuzhiyun 		 */
252*4882a593Smuzhiyun 		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
253*4882a593Smuzhiyun 			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
254*4882a593Smuzhiyun 			setup_force_cpu_cap(X86_FEATURE_SSBD);
255*4882a593Smuzhiyun 			x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
256*4882a593Smuzhiyun 		}
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
early_init_hygon(struct cpuinfo_x86 * c)260*4882a593Smuzhiyun static void early_init_hygon(struct cpuinfo_x86 *c)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	u32 dummy;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	early_init_hygon_mc(c);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	set_cpu_cap(c, X86_FEATURE_K8);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/*
271*4882a593Smuzhiyun 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
272*4882a593Smuzhiyun 	 * with P/T states and does not stop in deep C-states
273*4882a593Smuzhiyun 	 */
274*4882a593Smuzhiyun 	if (c->x86_power & (1 << 8)) {
275*4882a593Smuzhiyun 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
276*4882a593Smuzhiyun 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
280*4882a593Smuzhiyun 	if (c->x86_power & BIT(12))
281*4882a593Smuzhiyun 		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #ifdef CONFIG_X86_64
284*4882a593Smuzhiyun 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
285*4882a593Smuzhiyun #endif
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
288*4882a593Smuzhiyun 	/*
289*4882a593Smuzhiyun 	 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
290*4882a593Smuzhiyun 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally.
291*4882a593Smuzhiyun 	 */
292*4882a593Smuzhiyun 	if (boot_cpu_has(X86_FEATURE_APIC))
293*4882a593Smuzhiyun 		set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
294*4882a593Smuzhiyun #endif
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/*
297*4882a593Smuzhiyun 	 * This is only needed to tell the kernel whether to use VMCALL
298*4882a593Smuzhiyun 	 * and VMMCALL.  VMMCALL is never executed except under virt, so
299*4882a593Smuzhiyun 	 * we can set it unconditionally.
300*4882a593Smuzhiyun 	 */
301*4882a593Smuzhiyun 	set_cpu_cap(c, X86_FEATURE_VMMCALL);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	hygon_get_topology_early(c);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
init_hygon(struct cpuinfo_x86 * c)306*4882a593Smuzhiyun static void init_hygon(struct cpuinfo_x86 *c)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	early_init_hygon(c);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/*
311*4882a593Smuzhiyun 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
312*4882a593Smuzhiyun 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
313*4882a593Smuzhiyun 	 */
314*4882a593Smuzhiyun 	clear_cpu_cap(c, 0*32+31);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	set_cpu_cap(c, X86_FEATURE_REP_GOOD);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	/* get apicid instead of initial apic id from cpuid */
319*4882a593Smuzhiyun 	c->apicid = hard_smp_processor_id();
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/*
322*4882a593Smuzhiyun 	 * XXX someone from Hygon needs to confirm this DTRT
323*4882a593Smuzhiyun 	 *
324*4882a593Smuzhiyun 	init_spectral_chicken(c);
325*4882a593Smuzhiyun 	 */
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	set_cpu_cap(c, X86_FEATURE_ZEN);
328*4882a593Smuzhiyun 	set_cpu_cap(c, X86_FEATURE_CPB);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	cpu_detect_cache_sizes(c);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	hygon_detect_cmp(c);
333*4882a593Smuzhiyun 	hygon_get_topology(c);
334*4882a593Smuzhiyun 	srat_detect_node(c);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	init_hygon_cacheinfo(c);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (cpu_has(c, X86_FEATURE_XMM2)) {
339*4882a593Smuzhiyun 		/*
340*4882a593Smuzhiyun 		 * Use LFENCE for execution serialization.  On families which
341*4882a593Smuzhiyun 		 * don't have that MSR, LFENCE is already serializing.
342*4882a593Smuzhiyun 		 * msr_set_bit() uses the safe accessors, too, even if the MSR
343*4882a593Smuzhiyun 		 * is not present.
344*4882a593Smuzhiyun 		 */
345*4882a593Smuzhiyun 		msr_set_bit(MSR_AMD64_DE_CFG,
346*4882a593Smuzhiyun 			    MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		/* A serializing LFENCE stops RDTSC speculation */
349*4882a593Smuzhiyun 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/*
353*4882a593Smuzhiyun 	 * Hygon processors have APIC timer running in deep C states.
354*4882a593Smuzhiyun 	 */
355*4882a593Smuzhiyun 	set_cpu_cap(c, X86_FEATURE_ARAT);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
358*4882a593Smuzhiyun 	if (!cpu_has(c, X86_FEATURE_XENPV))
359*4882a593Smuzhiyun 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	check_null_seg_clears_base(c);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
cpu_detect_tlb_hygon(struct cpuinfo_x86 * c)364*4882a593Smuzhiyun static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	u32 ebx, eax, ecx, edx;
367*4882a593Smuzhiyun 	u16 mask = 0xfff;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	if (c->extended_cpuid_level < 0x80000006)
370*4882a593Smuzhiyun 		return;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
375*4882a593Smuzhiyun 	tlb_lli_4k[ENTRIES] = ebx & mask;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
378*4882a593Smuzhiyun 	if (!((eax >> 16) & mask))
379*4882a593Smuzhiyun 		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
380*4882a593Smuzhiyun 	else
381*4882a593Smuzhiyun 		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* a 4M entry uses two 2M entries */
384*4882a593Smuzhiyun 	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
387*4882a593Smuzhiyun 	if (!(eax & mask)) {
388*4882a593Smuzhiyun 		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
389*4882a593Smuzhiyun 		tlb_lli_2m[ENTRIES] = eax & 0xff;
390*4882a593Smuzhiyun 	} else
391*4882a593Smuzhiyun 		tlb_lli_2m[ENTRIES] = eax & mask;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun static const struct cpu_dev hygon_cpu_dev = {
397*4882a593Smuzhiyun 	.c_vendor	= "Hygon",
398*4882a593Smuzhiyun 	.c_ident	= { "HygonGenuine" },
399*4882a593Smuzhiyun 	.c_early_init   = early_init_hygon,
400*4882a593Smuzhiyun 	.c_detect_tlb	= cpu_detect_tlb_hygon,
401*4882a593Smuzhiyun 	.c_bsp_init	= bsp_init_hygon,
402*4882a593Smuzhiyun 	.c_init		= init_hygon,
403*4882a593Smuzhiyun 	.c_x86_vendor	= X86_VENDOR_HYGON,
404*4882a593Smuzhiyun };
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun cpu_dev_register(hygon_cpu_dev);
407