1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #ifndef __ASM_SMP_H
6*4882a593Smuzhiyun #define __ASM_SMP_H
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/const.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /* Values for secondary_data.status */
11*4882a593Smuzhiyun #define CPU_STUCK_REASON_SHIFT (8)
12*4882a593Smuzhiyun #define CPU_BOOT_STATUS_MASK ((UL(1) << CPU_STUCK_REASON_SHIFT) - 1)
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define CPU_MMU_OFF (-1)
15*4882a593Smuzhiyun #define CPU_BOOT_SUCCESS (0)
16*4882a593Smuzhiyun /* The cpu invoked ops->cpu_die, synchronise it with cpu_kill */
17*4882a593Smuzhiyun #define CPU_KILL_ME (1)
18*4882a593Smuzhiyun /* The cpu couldn't die gracefully and is looping in the kernel */
19*4882a593Smuzhiyun #define CPU_STUCK_IN_KERNEL (2)
20*4882a593Smuzhiyun /* Fatal system error detected by secondary CPU, crash the system */
21*4882a593Smuzhiyun #define CPU_PANIC_KERNEL (3)
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT)
24*4882a593Smuzhiyun #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #ifndef __ASSEMBLY__
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <asm/percpu.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <linux/threads.h>
31*4882a593Smuzhiyun #include <linux/cpumask.h>
32*4882a593Smuzhiyun #include <linux/thread_info.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * We don't use this_cpu_read(cpu_number) as that has implicit writes to
38*4882a593Smuzhiyun * preempt_count, and associated (compiler) barriers, that we'd like to avoid
39*4882a593Smuzhiyun * the expense of. If we're preemptible, the value can be stale at use anyway.
40*4882a593Smuzhiyun * And we can't use this_cpu_ptr() either, as that winds up recursing back
41*4882a593Smuzhiyun * here under CONFIG_DEBUG_PREEMPT=y.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun #define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * Logical CPU mapping.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun extern u64 __cpu_logical_map[NR_CPUS];
49*4882a593Smuzhiyun extern u64 cpu_logical_map(unsigned int cpu);
50*4882a593Smuzhiyun
set_cpu_logical_map(unsigned int cpu,u64 hwid)51*4882a593Smuzhiyun static inline void set_cpu_logical_map(unsigned int cpu, u64 hwid)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun __cpu_logical_map[cpu] = hwid;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun struct seq_file;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * Discover the set of possible CPUs and determine their
60*4882a593Smuzhiyun * SMP operations.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun extern void smp_init_cpus(void);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Register IPI interrupts with the arch SMP code
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Called from the secondary holding pen, this is the secondary CPU entry point.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun asmlinkage void secondary_start_kernel(void);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * Initial data for bringing up a secondary CPU.
76*4882a593Smuzhiyun * @stack - sp for the secondary CPU
77*4882a593Smuzhiyun * @status - Result passed back from the secondary CPU to
78*4882a593Smuzhiyun * indicate failure.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun struct secondary_data {
81*4882a593Smuzhiyun void *stack;
82*4882a593Smuzhiyun struct task_struct *task;
83*4882a593Smuzhiyun long status;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun extern struct secondary_data secondary_data;
87*4882a593Smuzhiyun extern long __early_cpu_boot_status;
88*4882a593Smuzhiyun extern void secondary_entry(void);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun extern void arch_send_call_function_single_ipi(int cpu);
91*4882a593Smuzhiyun extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
92*4882a593Smuzhiyun extern int nr_ipi_get(void);
93*4882a593Smuzhiyun extern struct irq_desc **ipi_desc_get(void);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
96*4882a593Smuzhiyun extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
97*4882a593Smuzhiyun #else
arch_send_wakeup_ipi_mask(const struct cpumask * mask)98*4882a593Smuzhiyun static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun BUILD_BUG();
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun extern int __cpu_disable(void);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun extern void __cpu_die(unsigned int cpu);
107*4882a593Smuzhiyun extern void cpu_die(void);
108*4882a593Smuzhiyun extern void cpu_die_early(void);
109*4882a593Smuzhiyun
cpu_park_loop(void)110*4882a593Smuzhiyun static inline void cpu_park_loop(void)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun for (;;) {
113*4882a593Smuzhiyun wfe();
114*4882a593Smuzhiyun wfi();
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
update_cpu_boot_status(int val)118*4882a593Smuzhiyun static inline void update_cpu_boot_status(int val)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun WRITE_ONCE(secondary_data.status, val);
121*4882a593Smuzhiyun /* Ensure the visibility of the status update */
122*4882a593Smuzhiyun dsb(ishst);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * The calling secondary CPU has detected serious configuration mismatch,
127*4882a593Smuzhiyun * which calls for a kernel panic. Update the boot status and park the calling
128*4882a593Smuzhiyun * CPU.
129*4882a593Smuzhiyun */
cpu_panic_kernel(void)130*4882a593Smuzhiyun static inline void cpu_panic_kernel(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun update_cpu_boot_status(CPU_PANIC_KERNEL);
133*4882a593Smuzhiyun cpu_park_loop();
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * If a secondary CPU enters the kernel but fails to come online,
138*4882a593Smuzhiyun * (e.g. due to mismatched features), and cannot exit the kernel,
139*4882a593Smuzhiyun * we increment cpus_stuck_in_kernel and leave the CPU in a
140*4882a593Smuzhiyun * quiesecent loop within the kernel text. The memory containing
141*4882a593Smuzhiyun * this loop must not be re-used for anything else as the 'stuck'
142*4882a593Smuzhiyun * core is executing it.
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * This function is used to inhibit features like kexec and hibernate.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun bool cpus_are_stuck_in_kernel(void);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun extern void crash_smp_send_stop(void);
149*4882a593Smuzhiyun extern bool smp_crash_stop_failed(void);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun #endif /* ifndef __ASSEMBLY__ */
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #endif /* ifndef __ASM_SMP_H */
154