xref: /OK3568_Linux_fs/kernel/arch/sh/include/asm/smp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ASM_SH_SMP_H
3*4882a593Smuzhiyun #define __ASM_SH_SMP_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/bitops.h>
6*4882a593Smuzhiyun #include <linux/cpumask.h>
7*4882a593Smuzhiyun #include <asm/smp-ops.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifdef CONFIG_SMP
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/atomic.h>
12*4882a593Smuzhiyun #include <asm/current.h>
13*4882a593Smuzhiyun #include <asm/percpu.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define raw_smp_processor_id()	(current_thread_info()->cpu)
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /* Map from cpu id to sequential logical cpu number. */
18*4882a593Smuzhiyun extern int __cpu_number_map[NR_CPUS];
19*4882a593Smuzhiyun #define cpu_number_map(cpu)  __cpu_number_map[cpu]
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* The reverse map from sequential logical cpu number to cpu id.  */
22*4882a593Smuzhiyun extern int __cpu_logical_map[NR_CPUS];
23*4882a593Smuzhiyun #define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun enum {
26*4882a593Smuzhiyun 	SMP_MSG_FUNCTION,
27*4882a593Smuzhiyun 	SMP_MSG_RESCHEDULE,
28*4882a593Smuzhiyun 	SMP_MSG_FUNCTION_SINGLE,
29*4882a593Smuzhiyun 	SMP_MSG_TIMER,
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	SMP_MSG_NR,	/* must be last */
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun DECLARE_PER_CPU(int, cpu_state);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun void smp_message_recv(unsigned int msg);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun void arch_send_call_function_single_ipi(int cpu);
39*4882a593Smuzhiyun void arch_send_call_function_ipi_mask(const struct cpumask *mask);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun void native_play_dead(void);
42*4882a593Smuzhiyun void native_cpu_die(unsigned int cpu);
43*4882a593Smuzhiyun int native_cpu_disable(unsigned int cpu);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
46*4882a593Smuzhiyun void play_dead_common(void);
47*4882a593Smuzhiyun extern int __cpu_disable(void);
48*4882a593Smuzhiyun 
__cpu_die(unsigned int cpu)49*4882a593Smuzhiyun static inline void __cpu_die(unsigned int cpu)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	extern struct plat_smp_ops *mp_ops;     /* private */
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	mp_ops->cpu_die(cpu);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun #endif
56*4882a593Smuzhiyun 
hard_smp_processor_id(void)57*4882a593Smuzhiyun static inline int hard_smp_processor_id(void)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	extern struct plat_smp_ops *mp_ops;	/* private */
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if (!mp_ops)
62*4882a593Smuzhiyun 		return 0;	/* boot CPU */
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	return mp_ops->smp_processor_id();
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun struct of_cpu_method {
68*4882a593Smuzhiyun 	const char *method;
69*4882a593Smuzhiyun 	struct plat_smp_ops *ops;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define CPU_METHOD_OF_DECLARE(name, _method, _ops)			\
73*4882a593Smuzhiyun 	static const struct of_cpu_method __cpu_method_of_table_##name	\
74*4882a593Smuzhiyun 		__used __section("__cpu_method_of_table")		\
75*4882a593Smuzhiyun 		= { .method = _method, .ops = _ops }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #else
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define hard_smp_processor_id()	(0)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #endif /* CONFIG_SMP */
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #endif /* __ASM_SH_SMP_H */
84