xref: /OK3568_Linux_fs/kernel/include/linux/smp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __LINUX_SMP_H
3*4882a593Smuzhiyun #define __LINUX_SMP_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  *	Generic SMP support
7*4882a593Smuzhiyun  *		Alan Cox. <alan@redhat.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/errno.h>
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/cpumask.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/smp_types.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun typedef void (*smp_call_func_t)(void *info);
18*4882a593Smuzhiyun typedef bool (*smp_cond_func_t)(int cpu, void *info);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * structure shares (partial) layout with struct irq_work
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun struct __call_single_data {
24*4882a593Smuzhiyun 	union {
25*4882a593Smuzhiyun 		struct __call_single_node node;
26*4882a593Smuzhiyun 		struct {
27*4882a593Smuzhiyun 			struct llist_node llist;
28*4882a593Smuzhiyun 			unsigned int flags;
29*4882a593Smuzhiyun #ifdef CONFIG_64BIT
30*4882a593Smuzhiyun 			u16 src, dst;
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun 		};
33*4882a593Smuzhiyun 	};
34*4882a593Smuzhiyun 	smp_call_func_t func;
35*4882a593Smuzhiyun 	void *info;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
39*4882a593Smuzhiyun typedef struct __call_single_data call_single_data_t
40*4882a593Smuzhiyun 	__aligned(sizeof(struct __call_single_data));
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * Enqueue a llist_node on the call_single_queue; be very careful, read
44*4882a593Smuzhiyun  * flush_smp_call_function_queue() in detail.
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun extern void __smp_call_single_queue(int cpu, struct llist_node *node);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* total number of cpus in this system (may exceed NR_CPUS) */
49*4882a593Smuzhiyun extern unsigned int total_cpus;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
52*4882a593Smuzhiyun 			     int wait);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * Call a function on all processors
56*4882a593Smuzhiyun  */
57*4882a593Smuzhiyun void on_each_cpu(smp_call_func_t func, void *info, int wait);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * Call a function on processors specified by mask, which might include
61*4882a593Smuzhiyun  * the local one.
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
64*4882a593Smuzhiyun 		void *info, bool wait);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * Call a function on each processor for which the supplied function
68*4882a593Smuzhiyun  * cond_func returns a positive value. This may include the local
69*4882a593Smuzhiyun  * processor.
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
72*4882a593Smuzhiyun 		      void *info, bool wait);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
75*4882a593Smuzhiyun 			   void *info, bool wait, const struct cpumask *mask);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #ifdef CONFIG_SMP
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #include <linux/preempt.h>
82*4882a593Smuzhiyun #include <linux/kernel.h>
83*4882a593Smuzhiyun #include <linux/compiler.h>
84*4882a593Smuzhiyun #include <linux/thread_info.h>
85*4882a593Smuzhiyun #include <asm/smp.h>
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun  * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
89*4882a593Smuzhiyun  * (defined in asm header):
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * stops all CPUs but the current one:
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun extern void smp_send_stop(void);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * sends a 'reschedule' event to another CPU:
99*4882a593Smuzhiyun  */
100*4882a593Smuzhiyun extern void smp_send_reschedule(int cpu);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun  * Prepare machine for booting other CPUs.
105*4882a593Smuzhiyun  */
106*4882a593Smuzhiyun extern void smp_prepare_cpus(unsigned int max_cpus);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun  * Bring a CPU up
110*4882a593Smuzhiyun  */
111*4882a593Smuzhiyun extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun  * Final polishing of CPUs
115*4882a593Smuzhiyun  */
116*4882a593Smuzhiyun extern void smp_cpus_done(unsigned int max_cpus);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * Call a function on all other processors
120*4882a593Smuzhiyun  */
121*4882a593Smuzhiyun void smp_call_function(smp_call_func_t func, void *info, int wait);
122*4882a593Smuzhiyun void smp_call_function_many(const struct cpumask *mask,
123*4882a593Smuzhiyun 			    smp_call_func_t func, void *info, bool wait);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun int smp_call_function_any(const struct cpumask *mask,
126*4882a593Smuzhiyun 			  smp_call_func_t func, void *info, int wait);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun void kick_all_cpus_sync(void);
129*4882a593Smuzhiyun void wake_up_all_idle_cpus(void);
130*4882a593Smuzhiyun void wake_up_all_online_idle_cpus(void);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun  * Generic and arch helpers
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun void __init call_function_init(void);
136*4882a593Smuzhiyun void generic_smp_call_function_single_interrupt(void);
137*4882a593Smuzhiyun #define generic_smp_call_function_interrupt \
138*4882a593Smuzhiyun 	generic_smp_call_function_single_interrupt
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun  * Mark the boot cpu "online" so that it can call console drivers in
142*4882a593Smuzhiyun  * printk() and can access its per-cpu storage.
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun void smp_prepare_boot_cpu(void);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun extern unsigned int setup_max_cpus;
147*4882a593Smuzhiyun extern void __init setup_nr_cpu_ids(void);
148*4882a593Smuzhiyun extern void __init smp_init(void);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun extern int __boot_cpu_id;
151*4882a593Smuzhiyun 
get_boot_cpu_id(void)152*4882a593Smuzhiyun static inline int get_boot_cpu_id(void)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	return __boot_cpu_id;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun #else /* !SMP */
158*4882a593Smuzhiyun 
smp_send_stop(void)159*4882a593Smuzhiyun static inline void smp_send_stop(void) { }
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun  *	These macros fold the SMP functionality into a single CPU system
163*4882a593Smuzhiyun  */
164*4882a593Smuzhiyun #define raw_smp_processor_id()			0
up_smp_call_function(smp_call_func_t func,void * info)165*4882a593Smuzhiyun static inline void up_smp_call_function(smp_call_func_t func, void *info)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun #define smp_call_function(func, info, wait) \
169*4882a593Smuzhiyun 			(up_smp_call_function(func, info))
170*4882a593Smuzhiyun 
smp_send_reschedule(int cpu)171*4882a593Smuzhiyun static inline void smp_send_reschedule(int cpu) { }
172*4882a593Smuzhiyun #define smp_prepare_boot_cpu()			do {} while (0)
173*4882a593Smuzhiyun #define smp_call_function_many(mask, func, info, wait) \
174*4882a593Smuzhiyun 			(up_smp_call_function(func, info))
call_function_init(void)175*4882a593Smuzhiyun static inline void call_function_init(void) { }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun static inline int
smp_call_function_any(const struct cpumask * mask,smp_call_func_t func,void * info,int wait)178*4882a593Smuzhiyun smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
179*4882a593Smuzhiyun 		      void *info, int wait)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	return smp_call_function_single(0, func, info, wait);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
kick_all_cpus_sync(void)184*4882a593Smuzhiyun static inline void kick_all_cpus_sync(void) {  }
wake_up_all_idle_cpus(void)185*4882a593Smuzhiyun static inline void wake_up_all_idle_cpus(void) {  }
wake_up_all_online_idle_cpus(void)186*4882a593Smuzhiyun static inline void wake_up_all_online_idle_cpus(void) {  }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun #ifdef CONFIG_UP_LATE_INIT
189*4882a593Smuzhiyun extern void __init up_late_init(void);
smp_init(void)190*4882a593Smuzhiyun static inline void smp_init(void) { up_late_init(); }
191*4882a593Smuzhiyun #else
smp_init(void)192*4882a593Smuzhiyun static inline void smp_init(void) { }
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun 
get_boot_cpu_id(void)195*4882a593Smuzhiyun static inline int get_boot_cpu_id(void)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun #endif /* !SMP */
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun  * raw_processor_id() - get the current (unstable) CPU id
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * For then you know what you are doing and need an unstable
206*4882a593Smuzhiyun  * CPU id.
207*4882a593Smuzhiyun  */
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun  * smp_processor_id() - get the current (stable) CPU id
211*4882a593Smuzhiyun  *
212*4882a593Smuzhiyun  * This is the normal accessor to the CPU id and should be used
213*4882a593Smuzhiyun  * whenever possible.
214*4882a593Smuzhiyun  *
215*4882a593Smuzhiyun  * The CPU id is stable when:
216*4882a593Smuzhiyun  *
217*4882a593Smuzhiyun  *  - IRQs are disabled;
218*4882a593Smuzhiyun  *  - preemption is disabled;
219*4882a593Smuzhiyun  *  - the task is CPU affine.
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
222*4882a593Smuzhiyun  * when smp_processor_id() is used when the CPU id is not stable.
223*4882a593Smuzhiyun  */
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun  * Allow the architecture to differentiate between a stable and unstable read.
227*4882a593Smuzhiyun  * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
228*4882a593Smuzhiyun  * regular asm read for the stable.
229*4882a593Smuzhiyun  */
230*4882a593Smuzhiyun #ifndef __smp_processor_id
231*4882a593Smuzhiyun #define __smp_processor_id(x) raw_smp_processor_id(x)
232*4882a593Smuzhiyun #endif
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_PREEMPT
235*4882a593Smuzhiyun   extern unsigned int debug_smp_processor_id(void);
236*4882a593Smuzhiyun # define smp_processor_id() debug_smp_processor_id()
237*4882a593Smuzhiyun #else
238*4882a593Smuzhiyun # define smp_processor_id() __smp_processor_id()
239*4882a593Smuzhiyun #endif
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun #define get_cpu()		({ preempt_disable(); __smp_processor_id(); })
242*4882a593Smuzhiyun #define put_cpu()		preempt_enable()
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun  * Callback to arch code if there's nosmp or maxcpus=0 on the
246*4882a593Smuzhiyun  * boot command line:
247*4882a593Smuzhiyun  */
248*4882a593Smuzhiyun extern void arch_disable_smp_support(void);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun extern void arch_thaw_secondary_cpus_begin(void);
251*4882a593Smuzhiyun extern void arch_thaw_secondary_cpus_end(void);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun void smp_setup_processor_id(void);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
256*4882a593Smuzhiyun 		    bool phys);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /* SMP core functions */
259*4882a593Smuzhiyun int smpcfd_prepare_cpu(unsigned int cpu);
260*4882a593Smuzhiyun int smpcfd_dead_cpu(unsigned int cpu);
261*4882a593Smuzhiyun int smpcfd_dying_cpu(unsigned int cpu);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun #endif /* __LINUX_SMP_H */
264