xref: /OK3568_Linux_fs/kernel/arch/sparc/include/asm/smp_64.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* smp.h: Sparc64 specific SMP stuff.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef _SPARC64_SMP_H
8*4882a593Smuzhiyun #define _SPARC64_SMP_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/threads.h>
11*4882a593Smuzhiyun #include <asm/asi.h>
12*4882a593Smuzhiyun #include <asm/starfire.h>
13*4882a593Smuzhiyun #include <asm/spitfire.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #ifndef __ASSEMBLY__
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/cpumask.h>
18*4882a593Smuzhiyun #include <linux/cache.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #endif /* !(__ASSEMBLY__) */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #ifdef CONFIG_SMP
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifndef __ASSEMBLY__
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  *	Private routines/data
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <linux/bitops.h>
31*4882a593Smuzhiyun #include <linux/atomic.h>
32*4882a593Smuzhiyun #include <asm/percpu.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
35*4882a593Smuzhiyun extern cpumask_t cpu_core_map[NR_CPUS];
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun void smp_init_cpu_poke(void);
38*4882a593Smuzhiyun void scheduler_poke(void);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun void arch_send_call_function_single_ipi(int cpu);
41*4882a593Smuzhiyun void arch_send_call_function_ipi_mask(const struct cpumask *mask);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  *	General functions that each host system must provide.
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun int hard_smp_processor_id(void);
48*4882a593Smuzhiyun #define raw_smp_processor_id() (current_thread_info()->cpu)
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun void smp_fill_in_cpu_possible_map(void);
51*4882a593Smuzhiyun void smp_fill_in_sib_core_maps(void);
52*4882a593Smuzhiyun void cpu_play_dead(void);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun void smp_fetch_global_regs(void);
55*4882a593Smuzhiyun void smp_fetch_global_pmu(void);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun struct seq_file;
58*4882a593Smuzhiyun void smp_bogo(struct seq_file *);
59*4882a593Smuzhiyun void smp_info(struct seq_file *);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun void smp_callin(void);
62*4882a593Smuzhiyun void cpu_panic(void);
63*4882a593Smuzhiyun void smp_synchronize_tick_client(void);
64*4882a593Smuzhiyun void smp_capture(void);
65*4882a593Smuzhiyun void smp_release(void);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
68*4882a593Smuzhiyun int __cpu_disable(void);
69*4882a593Smuzhiyun void __cpu_die(unsigned int cpu);
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #endif /* !(__ASSEMBLY__) */
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #else
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define hard_smp_processor_id()		0
77*4882a593Smuzhiyun #define smp_fill_in_sib_core_maps() do { } while (0)
78*4882a593Smuzhiyun #define smp_fetch_global_regs() do { } while (0)
79*4882a593Smuzhiyun #define smp_fetch_global_pmu() do { } while (0)
80*4882a593Smuzhiyun #define smp_fill_in_cpu_possible_map() do { } while (0)
81*4882a593Smuzhiyun #define smp_init_cpu_poke() do { } while (0)
82*4882a593Smuzhiyun #define scheduler_poke() do { } while (0)
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #endif /* !(CONFIG_SMP) */
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #endif /* !(_SPARC64_SMP_H) */
87