1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_SMP_H
3*4882a593Smuzhiyun #define _ASM_X86_SMP_H
4*4882a593Smuzhiyun #ifndef __ASSEMBLY__
5*4882a593Smuzhiyun #include <linux/cpumask.h>
6*4882a593Smuzhiyun #include <asm/percpu.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <asm/thread_info.h>
9*4882a593Smuzhiyun #include <asm/cpumask.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun extern int smp_num_siblings;
12*4882a593Smuzhiyun extern unsigned int num_processors;
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
15*4882a593Smuzhiyun DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
16*4882a593Smuzhiyun DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
17*4882a593Smuzhiyun /* cpus sharing the last level cache: */
18*4882a593Smuzhiyun DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19*4882a593Smuzhiyun DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
20*4882a593Smuzhiyun DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
21*4882a593Smuzhiyun
cpu_llc_shared_mask(int cpu)22*4882a593Smuzhiyun static inline struct cpumask *cpu_llc_shared_mask(int cpu)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun return per_cpu(cpu_llc_shared_map, cpu);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
28*4882a593Smuzhiyun DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
29*4882a593Smuzhiyun DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
30*4882a593Smuzhiyun #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
31*4882a593Smuzhiyun DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct task_struct;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct smp_ops {
37*4882a593Smuzhiyun void (*smp_prepare_boot_cpu)(void);
38*4882a593Smuzhiyun void (*smp_prepare_cpus)(unsigned max_cpus);
39*4882a593Smuzhiyun void (*smp_cpus_done)(unsigned max_cpus);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun void (*stop_other_cpus)(int wait);
42*4882a593Smuzhiyun void (*crash_stop_other_cpus)(void);
43*4882a593Smuzhiyun void (*smp_send_reschedule)(int cpu);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
46*4882a593Smuzhiyun int (*cpu_disable)(void);
47*4882a593Smuzhiyun void (*cpu_die)(unsigned int cpu);
48*4882a593Smuzhiyun void (*play_dead)(void);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun void (*send_call_func_ipi)(const struct cpumask *mask);
51*4882a593Smuzhiyun void (*send_call_func_single_ipi)(int cpu);
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Globals due to paravirt */
55*4882a593Smuzhiyun extern void set_cpu_sibling_map(int cpu);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #ifdef CONFIG_SMP
58*4882a593Smuzhiyun extern struct smp_ops smp_ops;
59*4882a593Smuzhiyun
smp_send_stop(void)60*4882a593Smuzhiyun static inline void smp_send_stop(void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun smp_ops.stop_other_cpus(0);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
stop_other_cpus(void)65*4882a593Smuzhiyun static inline void stop_other_cpus(void)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun smp_ops.stop_other_cpus(1);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
smp_prepare_boot_cpu(void)70*4882a593Smuzhiyun static inline void smp_prepare_boot_cpu(void)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun smp_ops.smp_prepare_boot_cpu();
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
smp_prepare_cpus(unsigned int max_cpus)75*4882a593Smuzhiyun static inline void smp_prepare_cpus(unsigned int max_cpus)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun smp_ops.smp_prepare_cpus(max_cpus);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
smp_cpus_done(unsigned int max_cpus)80*4882a593Smuzhiyun static inline void smp_cpus_done(unsigned int max_cpus)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun smp_ops.smp_cpus_done(max_cpus);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
__cpu_up(unsigned int cpu,struct task_struct * tidle)85*4882a593Smuzhiyun static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun return smp_ops.cpu_up(cpu, tidle);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
__cpu_disable(void)90*4882a593Smuzhiyun static inline int __cpu_disable(void)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return smp_ops.cpu_disable();
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
__cpu_die(unsigned int cpu)95*4882a593Smuzhiyun static inline void __cpu_die(unsigned int cpu)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun smp_ops.cpu_die(cpu);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
play_dead(void)100*4882a593Smuzhiyun static inline void play_dead(void)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun smp_ops.play_dead();
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
smp_send_reschedule(int cpu)105*4882a593Smuzhiyun static inline void smp_send_reschedule(int cpu)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun smp_ops.smp_send_reschedule(cpu);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
arch_send_call_function_single_ipi(int cpu)110*4882a593Smuzhiyun static inline void arch_send_call_function_single_ipi(int cpu)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun smp_ops.send_call_func_single_ipi(cpu);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
arch_send_call_function_ipi_mask(const struct cpumask * mask)115*4882a593Smuzhiyun static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun smp_ops.send_call_func_ipi(mask);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun void cpu_disable_common(void);
121*4882a593Smuzhiyun void native_smp_prepare_boot_cpu(void);
122*4882a593Smuzhiyun void native_smp_prepare_cpus(unsigned int max_cpus);
123*4882a593Smuzhiyun void calculate_max_logical_packages(void);
124*4882a593Smuzhiyun void native_smp_cpus_done(unsigned int max_cpus);
125*4882a593Smuzhiyun int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
126*4882a593Smuzhiyun int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
127*4882a593Smuzhiyun int native_cpu_disable(void);
128*4882a593Smuzhiyun int common_cpu_die(unsigned int cpu);
129*4882a593Smuzhiyun void native_cpu_die(unsigned int cpu);
130*4882a593Smuzhiyun void hlt_play_dead(void);
131*4882a593Smuzhiyun void native_play_dead(void);
132*4882a593Smuzhiyun void play_dead_common(void);
133*4882a593Smuzhiyun void wbinvd_on_cpu(int cpu);
134*4882a593Smuzhiyun int wbinvd_on_all_cpus(void);
135*4882a593Smuzhiyun void cond_wakeup_cpu0(void);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun void native_smp_send_reschedule(int cpu);
138*4882a593Smuzhiyun void native_send_call_func_ipi(const struct cpumask *mask);
139*4882a593Smuzhiyun void native_send_call_func_single_ipi(int cpu);
140*4882a593Smuzhiyun void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun void smp_store_boot_cpu_info(void);
143*4882a593Smuzhiyun void smp_store_cpu_info(int id);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun asmlinkage __visible void smp_reboot_interrupt(void);
146*4882a593Smuzhiyun __visible void smp_reschedule_interrupt(struct pt_regs *regs);
147*4882a593Smuzhiyun __visible void smp_call_function_interrupt(struct pt_regs *regs);
148*4882a593Smuzhiyun __visible void smp_call_function_single_interrupt(struct pt_regs *r);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
151*4882a593Smuzhiyun #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * This function is needed by all SMP systems. It must _always_ be valid
155*4882a593Smuzhiyun * from the initial startup. We map APIC_BASE very early in page_setup(),
156*4882a593Smuzhiyun * so this is correct in the x86 case.
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun #define raw_smp_processor_id() this_cpu_read(cpu_number)
159*4882a593Smuzhiyun #define __smp_processor_id() __this_cpu_read(cpu_number)
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun #ifdef CONFIG_X86_32
162*4882a593Smuzhiyun extern int safe_smp_processor_id(void);
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun # define safe_smp_processor_id() smp_processor_id()
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun #else /* !CONFIG_SMP */
168*4882a593Smuzhiyun #define wbinvd_on_cpu(cpu) wbinvd()
wbinvd_on_all_cpus(void)169*4882a593Smuzhiyun static inline int wbinvd_on_all_cpus(void)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun wbinvd();
172*4882a593Smuzhiyun return 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun #endif /* CONFIG_SMP */
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun extern unsigned disabled_cpus;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun #ifdef CONFIG_X86_LOCAL_APIC
179*4882a593Smuzhiyun extern int hard_smp_processor_id(void);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun #else /* CONFIG_X86_LOCAL_APIC */
182*4882a593Smuzhiyun #define hard_smp_processor_id() 0
183*4882a593Smuzhiyun #endif /* CONFIG_X86_LOCAL_APIC */
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_NMI_SELFTEST
186*4882a593Smuzhiyun extern void nmi_selftest(void);
187*4882a593Smuzhiyun #else
188*4882a593Smuzhiyun #define nmi_selftest() do { } while (0)
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
192*4882a593Smuzhiyun #endif /* _ASM_X86_SMP_H */
193