1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2000, 2001 Kanoj Sarcar
5*4882a593Smuzhiyun * Copyright (C) 2000, 2001 Ralf Baechle
6*4882a593Smuzhiyun * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7*4882a593Smuzhiyun * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/cache.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/smp.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/threads.h>
16*4882a593Smuzhiyun #include <linux/export.h>
17*4882a593Smuzhiyun #include <linux/time.h>
18*4882a593Smuzhiyun #include <linux/timex.h>
19*4882a593Smuzhiyun #include <linux/sched/mm.h>
20*4882a593Smuzhiyun #include <linux/cpumask.h>
21*4882a593Smuzhiyun #include <linux/cpu.h>
22*4882a593Smuzhiyun #include <linux/err.h>
23*4882a593Smuzhiyun #include <linux/ftrace.h>
24*4882a593Smuzhiyun #include <linux/irqdomain.h>
25*4882a593Smuzhiyun #include <linux/of.h>
26*4882a593Smuzhiyun #include <linux/of_irq.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <linux/atomic.h>
29*4882a593Smuzhiyun #include <asm/cpu.h>
30*4882a593Smuzhiyun #include <asm/ginvt.h>
31*4882a593Smuzhiyun #include <asm/processor.h>
32*4882a593Smuzhiyun #include <asm/idle.h>
33*4882a593Smuzhiyun #include <asm/r4k-timer.h>
34*4882a593Smuzhiyun #include <asm/mips-cps.h>
35*4882a593Smuzhiyun #include <asm/mmu_context.h>
36*4882a593Smuzhiyun #include <asm/time.h>
37*4882a593Smuzhiyun #include <asm/setup.h>
38*4882a593Smuzhiyun #include <asm/maar.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
41*4882a593Smuzhiyun EXPORT_SYMBOL(__cpu_number_map);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
44*4882a593Smuzhiyun EXPORT_SYMBOL(__cpu_logical_map);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Number of TCs (or siblings in Intel speak) per CPU core */
47*4882a593Smuzhiyun int smp_num_siblings = 1;
48*4882a593Smuzhiyun EXPORT_SYMBOL(smp_num_siblings);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* representing the TCs (or siblings in Intel speak) of each logical CPU */
51*4882a593Smuzhiyun cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
52*4882a593Smuzhiyun EXPORT_SYMBOL(cpu_sibling_map);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* representing the core map of multi-core chips of each logical CPU */
55*4882a593Smuzhiyun cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
56*4882a593Smuzhiyun EXPORT_SYMBOL(cpu_core_map);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static DECLARE_COMPLETION(cpu_starting);
59*4882a593Smuzhiyun static DECLARE_COMPLETION(cpu_running);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * A logcal cpu mask containing only one VPE per core to
63*4882a593Smuzhiyun * reduce the number of IPIs on large MT systems.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
66*4882a593Smuzhiyun EXPORT_SYMBOL(cpu_foreign_map);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* representing cpus for which sibling maps can be computed */
69*4882a593Smuzhiyun static cpumask_t cpu_sibling_setup_map;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* representing cpus for which core maps can be computed */
72*4882a593Smuzhiyun static cpumask_t cpu_core_setup_map;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun cpumask_t cpu_coherent_mask;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_IRQ_IPI
77*4882a593Smuzhiyun static struct irq_desc *call_desc;
78*4882a593Smuzhiyun static struct irq_desc *sched_desc;
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun
set_cpu_sibling_map(int cpu)81*4882a593Smuzhiyun static inline void set_cpu_sibling_map(int cpu)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun int i;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (smp_num_siblings > 1) {
88*4882a593Smuzhiyun for_each_cpu(i, &cpu_sibling_setup_map) {
89*4882a593Smuzhiyun if (cpus_are_siblings(cpu, i)) {
90*4882a593Smuzhiyun cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
91*4882a593Smuzhiyun cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun } else
95*4882a593Smuzhiyun cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
set_cpu_core_map(int cpu)98*4882a593Smuzhiyun static inline void set_cpu_core_map(int cpu)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun int i;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun cpumask_set_cpu(cpu, &cpu_core_setup_map);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun for_each_cpu(i, &cpu_core_setup_map) {
105*4882a593Smuzhiyun if (cpu_data[cpu].package == cpu_data[i].package) {
106*4882a593Smuzhiyun cpumask_set_cpu(i, &cpu_core_map[cpu]);
107*4882a593Smuzhiyun cpumask_set_cpu(cpu, &cpu_core_map[i]);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Calculate a new cpu_foreign_map mask whenever a
114*4882a593Smuzhiyun * new cpu appears or disappears.
115*4882a593Smuzhiyun */
calculate_cpu_foreign_map(void)116*4882a593Smuzhiyun void calculate_cpu_foreign_map(void)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun int i, k, core_present;
119*4882a593Smuzhiyun cpumask_t temp_foreign_map;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Re-calculate the mask */
122*4882a593Smuzhiyun cpumask_clear(&temp_foreign_map);
123*4882a593Smuzhiyun for_each_online_cpu(i) {
124*4882a593Smuzhiyun core_present = 0;
125*4882a593Smuzhiyun for_each_cpu(k, &temp_foreign_map)
126*4882a593Smuzhiyun if (cpus_are_siblings(i, k))
127*4882a593Smuzhiyun core_present = 1;
128*4882a593Smuzhiyun if (!core_present)
129*4882a593Smuzhiyun cpumask_set_cpu(i, &temp_foreign_map);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun for_each_online_cpu(i)
133*4882a593Smuzhiyun cpumask_andnot(&cpu_foreign_map[i],
134*4882a593Smuzhiyun &temp_foreign_map, &cpu_sibling_map[i]);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun const struct plat_smp_ops *mp_ops;
138*4882a593Smuzhiyun EXPORT_SYMBOL(mp_ops);
139*4882a593Smuzhiyun
register_smp_ops(const struct plat_smp_ops * ops)140*4882a593Smuzhiyun void register_smp_ops(const struct plat_smp_ops *ops)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun if (mp_ops)
143*4882a593Smuzhiyun printk(KERN_WARNING "Overriding previously set SMP ops\n");
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun mp_ops = ops;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_IRQ_IPI
mips_smp_send_ipi_single(int cpu,unsigned int action)149*4882a593Smuzhiyun void mips_smp_send_ipi_single(int cpu, unsigned int action)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun mips_smp_send_ipi_mask(cpumask_of(cpu), action);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
mips_smp_send_ipi_mask(const struct cpumask * mask,unsigned int action)154*4882a593Smuzhiyun void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun unsigned long flags;
157*4882a593Smuzhiyun unsigned int core;
158*4882a593Smuzhiyun int cpu;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun local_irq_save(flags);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun switch (action) {
163*4882a593Smuzhiyun case SMP_CALL_FUNCTION:
164*4882a593Smuzhiyun __ipi_send_mask(call_desc, mask);
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun case SMP_RESCHEDULE_YOURSELF:
168*4882a593Smuzhiyun __ipi_send_mask(sched_desc, mask);
169*4882a593Smuzhiyun break;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun default:
172*4882a593Smuzhiyun BUG();
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (mips_cpc_present()) {
176*4882a593Smuzhiyun for_each_cpu(cpu, mask) {
177*4882a593Smuzhiyun if (cpus_are_siblings(cpu, smp_processor_id()))
178*4882a593Smuzhiyun continue;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun core = cpu_core(&cpu_data[cpu]);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
183*4882a593Smuzhiyun mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
184*4882a593Smuzhiyun mips_cpc_lock_other(core);
185*4882a593Smuzhiyun write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
186*4882a593Smuzhiyun mips_cpc_unlock_other();
187*4882a593Smuzhiyun mips_cm_unlock_other();
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun local_irq_restore(flags);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun
ipi_resched_interrupt(int irq,void * dev_id)196*4882a593Smuzhiyun static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun scheduler_ipi();
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return IRQ_HANDLED;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
ipi_call_interrupt(int irq,void * dev_id)203*4882a593Smuzhiyun static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun generic_smp_call_function_interrupt();
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun return IRQ_HANDLED;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
smp_ipi_init_one(unsigned int virq,const char * name,irq_handler_t handler)210*4882a593Smuzhiyun static void smp_ipi_init_one(unsigned int virq, const char *name,
211*4882a593Smuzhiyun irq_handler_t handler)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun int ret;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun irq_set_handler(virq, handle_percpu_irq);
216*4882a593Smuzhiyun ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
217*4882a593Smuzhiyun BUG_ON(ret);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun static unsigned int call_virq, sched_virq;
221*4882a593Smuzhiyun
mips_smp_ipi_allocate(const struct cpumask * mask)222*4882a593Smuzhiyun int mips_smp_ipi_allocate(const struct cpumask *mask)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun int virq;
225*4882a593Smuzhiyun struct irq_domain *ipidomain;
226*4882a593Smuzhiyun struct device_node *node;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun node = of_irq_find_parent(of_root);
229*4882a593Smuzhiyun ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun * Some platforms have half DT setup. So if we found irq node but
233*4882a593Smuzhiyun * didn't find an ipidomain, try to search for one that is not in the
234*4882a593Smuzhiyun * DT.
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun if (node && !ipidomain)
237*4882a593Smuzhiyun ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * There are systems which use IPI IRQ domains, but only have one
241*4882a593Smuzhiyun * registered when some runtime condition is met. For example a Malta
242*4882a593Smuzhiyun * kernel may include support for GIC & CPU interrupt controller IPI
243*4882a593Smuzhiyun * IRQ domains, but if run on a system with no GIC & no MT ASE then
244*4882a593Smuzhiyun * neither will be supported or registered.
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * We only have a problem if we're actually using multiple CPUs so fail
247*4882a593Smuzhiyun * loudly if that is the case. Otherwise simply return, skipping IPI
248*4882a593Smuzhiyun * setup, if we're running with only a single CPU.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun if (!ipidomain) {
251*4882a593Smuzhiyun BUG_ON(num_present_cpus() > 1);
252*4882a593Smuzhiyun return 0;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun virq = irq_reserve_ipi(ipidomain, mask);
256*4882a593Smuzhiyun BUG_ON(!virq);
257*4882a593Smuzhiyun if (!call_virq)
258*4882a593Smuzhiyun call_virq = virq;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun virq = irq_reserve_ipi(ipidomain, mask);
261*4882a593Smuzhiyun BUG_ON(!virq);
262*4882a593Smuzhiyun if (!sched_virq)
263*4882a593Smuzhiyun sched_virq = virq;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (irq_domain_is_ipi_per_cpu(ipidomain)) {
266*4882a593Smuzhiyun int cpu;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun for_each_cpu(cpu, mask) {
269*4882a593Smuzhiyun smp_ipi_init_one(call_virq + cpu, "IPI call",
270*4882a593Smuzhiyun ipi_call_interrupt);
271*4882a593Smuzhiyun smp_ipi_init_one(sched_virq + cpu, "IPI resched",
272*4882a593Smuzhiyun ipi_resched_interrupt);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun } else {
275*4882a593Smuzhiyun smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
276*4882a593Smuzhiyun smp_ipi_init_one(sched_virq, "IPI resched",
277*4882a593Smuzhiyun ipi_resched_interrupt);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
mips_smp_ipi_free(const struct cpumask * mask)283*4882a593Smuzhiyun int mips_smp_ipi_free(const struct cpumask *mask)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct irq_domain *ipidomain;
286*4882a593Smuzhiyun struct device_node *node;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun node = of_irq_find_parent(of_root);
289*4882a593Smuzhiyun ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * Some platforms have half DT setup. So if we found irq node but
293*4882a593Smuzhiyun * didn't find an ipidomain, try to search for one that is not in the
294*4882a593Smuzhiyun * DT.
295*4882a593Smuzhiyun */
296*4882a593Smuzhiyun if (node && !ipidomain)
297*4882a593Smuzhiyun ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun BUG_ON(!ipidomain);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (irq_domain_is_ipi_per_cpu(ipidomain)) {
302*4882a593Smuzhiyun int cpu;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun for_each_cpu(cpu, mask) {
305*4882a593Smuzhiyun free_irq(call_virq + cpu, NULL);
306*4882a593Smuzhiyun free_irq(sched_virq + cpu, NULL);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun irq_destroy_ipi(call_virq, mask);
310*4882a593Smuzhiyun irq_destroy_ipi(sched_virq, mask);
311*4882a593Smuzhiyun return 0;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun
mips_smp_ipi_init(void)315*4882a593Smuzhiyun static int __init mips_smp_ipi_init(void)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun if (num_possible_cpus() == 1)
318*4882a593Smuzhiyun return 0;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun mips_smp_ipi_allocate(cpu_possible_mask);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun call_desc = irq_to_desc(call_virq);
323*4882a593Smuzhiyun sched_desc = irq_to_desc(sched_virq);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun early_initcall(mips_smp_ipi_init);
328*4882a593Smuzhiyun #endif
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * First C code run on the secondary CPUs after being started up by
332*4882a593Smuzhiyun * the master.
333*4882a593Smuzhiyun */
start_secondary(void)334*4882a593Smuzhiyun asmlinkage void start_secondary(void)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun unsigned int cpu;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun cpu_probe();
339*4882a593Smuzhiyun per_cpu_trap_init(false);
340*4882a593Smuzhiyun mips_clockevent_init();
341*4882a593Smuzhiyun mp_ops->init_secondary();
342*4882a593Smuzhiyun cpu_report();
343*4882a593Smuzhiyun maar_init();
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun * XXX parity protection should be folded in here when it's converted
347*4882a593Smuzhiyun * to an option instead of something based on .cputype
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun calibrate_delay();
351*4882a593Smuzhiyun cpu = smp_processor_id();
352*4882a593Smuzhiyun cpu_data[cpu].udelay_val = loops_per_jiffy;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun set_cpu_sibling_map(cpu);
355*4882a593Smuzhiyun set_cpu_core_map(cpu);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun cpumask_set_cpu(cpu, &cpu_coherent_mask);
358*4882a593Smuzhiyun notify_cpu_starting(cpu);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /* Notify boot CPU that we're starting & ready to sync counters */
361*4882a593Smuzhiyun complete(&cpu_starting);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun synchronise_count_slave(cpu);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* The CPU is running and counters synchronised, now mark it online */
366*4882a593Smuzhiyun set_cpu_online(cpu, true);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun calculate_cpu_foreign_map();
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * Notify boot CPU that we're up & online and it can safely return
372*4882a593Smuzhiyun * from __cpu_up
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun complete(&cpu_running);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun * irq will be enabled in ->smp_finish(), enabling it too early
378*4882a593Smuzhiyun * is dangerous.
379*4882a593Smuzhiyun */
380*4882a593Smuzhiyun WARN_ON_ONCE(!irqs_disabled());
381*4882a593Smuzhiyun mp_ops->smp_finish();
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
stop_this_cpu(void * dummy)386*4882a593Smuzhiyun static void stop_this_cpu(void *dummy)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun /*
389*4882a593Smuzhiyun * Remove this CPU:
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun set_cpu_online(smp_processor_id(), false);
393*4882a593Smuzhiyun calculate_cpu_foreign_map();
394*4882a593Smuzhiyun local_irq_disable();
395*4882a593Smuzhiyun while (1);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
smp_send_stop(void)398*4882a593Smuzhiyun void smp_send_stop(void)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun smp_call_function(stop_this_cpu, NULL, 0);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
smp_cpus_done(unsigned int max_cpus)403*4882a593Smuzhiyun void __init smp_cpus_done(unsigned int max_cpus)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)408*4882a593Smuzhiyun void __init smp_prepare_cpus(unsigned int max_cpus)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun init_new_context(current, &init_mm);
411*4882a593Smuzhiyun current_thread_info()->cpu = 0;
412*4882a593Smuzhiyun mp_ops->prepare_cpus(max_cpus);
413*4882a593Smuzhiyun set_cpu_sibling_map(0);
414*4882a593Smuzhiyun set_cpu_core_map(0);
415*4882a593Smuzhiyun calculate_cpu_foreign_map();
416*4882a593Smuzhiyun #ifndef CONFIG_HOTPLUG_CPU
417*4882a593Smuzhiyun init_cpu_present(cpu_possible_mask);
418*4882a593Smuzhiyun #endif
419*4882a593Smuzhiyun cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)423*4882a593Smuzhiyun void smp_prepare_boot_cpu(void)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun if (mp_ops->prepare_boot_cpu)
426*4882a593Smuzhiyun mp_ops->prepare_boot_cpu();
427*4882a593Smuzhiyun set_cpu_possible(0, true);
428*4882a593Smuzhiyun set_cpu_online(0, true);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
__cpu_up(unsigned int cpu,struct task_struct * tidle)431*4882a593Smuzhiyun int __cpu_up(unsigned int cpu, struct task_struct *tidle)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun int err;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun err = mp_ops->boot_secondary(cpu, tidle);
436*4882a593Smuzhiyun if (err)
437*4882a593Smuzhiyun return err;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* Wait for CPU to start and be ready to sync counters */
440*4882a593Smuzhiyun if (!wait_for_completion_timeout(&cpu_starting,
441*4882a593Smuzhiyun msecs_to_jiffies(1000))) {
442*4882a593Smuzhiyun pr_crit("CPU%u: failed to start\n", cpu);
443*4882a593Smuzhiyun return -EIO;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun synchronise_count_master(cpu);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /* Wait for CPU to finish startup & mark itself online before return */
449*4882a593Smuzhiyun wait_for_completion(&cpu_running);
450*4882a593Smuzhiyun return 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)454*4882a593Smuzhiyun int setup_profiling_timer(unsigned int multiplier)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun return 0;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
flush_tlb_all_ipi(void * info)459*4882a593Smuzhiyun static void flush_tlb_all_ipi(void *info)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun local_flush_tlb_all();
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
flush_tlb_all(void)464*4882a593Smuzhiyun void flush_tlb_all(void)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun if (cpu_has_mmid) {
467*4882a593Smuzhiyun htw_stop();
468*4882a593Smuzhiyun ginvt_full();
469*4882a593Smuzhiyun sync_ginv();
470*4882a593Smuzhiyun instruction_hazard();
471*4882a593Smuzhiyun htw_start();
472*4882a593Smuzhiyun return;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun on_each_cpu(flush_tlb_all_ipi, NULL, 1);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
flush_tlb_mm_ipi(void * mm)478*4882a593Smuzhiyun static void flush_tlb_mm_ipi(void *mm)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun drop_mmu_context((struct mm_struct *)mm);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /*
484*4882a593Smuzhiyun * Special Variant of smp_call_function for use by TLB functions:
485*4882a593Smuzhiyun *
486*4882a593Smuzhiyun * o No return value
487*4882a593Smuzhiyun * o collapses to normal function call on UP kernels
488*4882a593Smuzhiyun * o collapses to normal function call on systems with a single shared
489*4882a593Smuzhiyun * primary cache.
490*4882a593Smuzhiyun */
smp_on_other_tlbs(void (* func)(void * info),void * info)491*4882a593Smuzhiyun static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun smp_call_function(func, info, 1);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
smp_on_each_tlb(void (* func)(void * info),void * info)496*4882a593Smuzhiyun static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun preempt_disable();
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun smp_on_other_tlbs(func, info);
501*4882a593Smuzhiyun func(info);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun preempt_enable();
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun * The following tlb flush calls are invoked when old translations are
508*4882a593Smuzhiyun * being torn down, or pte attributes are changing. For single threaded
509*4882a593Smuzhiyun * address spaces, a new context is obtained on the current cpu, and tlb
510*4882a593Smuzhiyun * context on other cpus are invalidated to force a new context allocation
511*4882a593Smuzhiyun * at switch_mm time, should the mm ever be used on other cpus. For
512*4882a593Smuzhiyun * multithreaded address spaces, intercpu interrupts have to be sent.
513*4882a593Smuzhiyun * Another case where intercpu interrupts are required is when the target
514*4882a593Smuzhiyun * mm might be active on another cpu (eg debuggers doing the flushes on
515*4882a593Smuzhiyun * behalf of debugees, kswapd stealing pages from another process etc).
516*4882a593Smuzhiyun * Kanoj 07/00.
517*4882a593Smuzhiyun */
518*4882a593Smuzhiyun
flush_tlb_mm(struct mm_struct * mm)519*4882a593Smuzhiyun void flush_tlb_mm(struct mm_struct *mm)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun preempt_disable();
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun if (cpu_has_mmid) {
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun * No need to worry about other CPUs - the ginvt in
526*4882a593Smuzhiyun * drop_mmu_context() will be globalized.
527*4882a593Smuzhiyun */
528*4882a593Smuzhiyun } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
529*4882a593Smuzhiyun smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
530*4882a593Smuzhiyun } else {
531*4882a593Smuzhiyun unsigned int cpu;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun for_each_online_cpu(cpu) {
534*4882a593Smuzhiyun if (cpu != smp_processor_id() && cpu_context(cpu, mm))
535*4882a593Smuzhiyun set_cpu_context(cpu, mm, 0);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun drop_mmu_context(mm);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun preempt_enable();
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun struct flush_tlb_data {
544*4882a593Smuzhiyun struct vm_area_struct *vma;
545*4882a593Smuzhiyun unsigned long addr1;
546*4882a593Smuzhiyun unsigned long addr2;
547*4882a593Smuzhiyun };
548*4882a593Smuzhiyun
flush_tlb_range_ipi(void * info)549*4882a593Smuzhiyun static void flush_tlb_range_ipi(void *info)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct flush_tlb_data *fd = info;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)556*4882a593Smuzhiyun void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
559*4882a593Smuzhiyun unsigned long addr;
560*4882a593Smuzhiyun u32 old_mmid;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun preempt_disable();
563*4882a593Smuzhiyun if (cpu_has_mmid) {
564*4882a593Smuzhiyun htw_stop();
565*4882a593Smuzhiyun old_mmid = read_c0_memorymapid();
566*4882a593Smuzhiyun write_c0_memorymapid(cpu_asid(0, mm));
567*4882a593Smuzhiyun mtc0_tlbw_hazard();
568*4882a593Smuzhiyun addr = round_down(start, PAGE_SIZE * 2);
569*4882a593Smuzhiyun end = round_up(end, PAGE_SIZE * 2);
570*4882a593Smuzhiyun do {
571*4882a593Smuzhiyun ginvt_va_mmid(addr);
572*4882a593Smuzhiyun sync_ginv();
573*4882a593Smuzhiyun addr += PAGE_SIZE * 2;
574*4882a593Smuzhiyun } while (addr < end);
575*4882a593Smuzhiyun write_c0_memorymapid(old_mmid);
576*4882a593Smuzhiyun instruction_hazard();
577*4882a593Smuzhiyun htw_start();
578*4882a593Smuzhiyun } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
579*4882a593Smuzhiyun struct flush_tlb_data fd = {
580*4882a593Smuzhiyun .vma = vma,
581*4882a593Smuzhiyun .addr1 = start,
582*4882a593Smuzhiyun .addr2 = end,
583*4882a593Smuzhiyun };
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
586*4882a593Smuzhiyun local_flush_tlb_range(vma, start, end);
587*4882a593Smuzhiyun } else {
588*4882a593Smuzhiyun unsigned int cpu;
589*4882a593Smuzhiyun int exec = vma->vm_flags & VM_EXEC;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun for_each_online_cpu(cpu) {
592*4882a593Smuzhiyun /*
593*4882a593Smuzhiyun * flush_cache_range() will only fully flush icache if
594*4882a593Smuzhiyun * the VMA is executable, otherwise we must invalidate
595*4882a593Smuzhiyun * ASID without it appearing to has_valid_asid() as if
596*4882a593Smuzhiyun * mm has been completely unused by that CPU.
597*4882a593Smuzhiyun */
598*4882a593Smuzhiyun if (cpu != smp_processor_id() && cpu_context(cpu, mm))
599*4882a593Smuzhiyun set_cpu_context(cpu, mm, !exec);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun local_flush_tlb_range(vma, start, end);
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun preempt_enable();
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
flush_tlb_kernel_range_ipi(void * info)606*4882a593Smuzhiyun static void flush_tlb_kernel_range_ipi(void *info)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun struct flush_tlb_data *fd = info;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
flush_tlb_kernel_range(unsigned long start,unsigned long end)613*4882a593Smuzhiyun void flush_tlb_kernel_range(unsigned long start, unsigned long end)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct flush_tlb_data fd = {
616*4882a593Smuzhiyun .addr1 = start,
617*4882a593Smuzhiyun .addr2 = end,
618*4882a593Smuzhiyun };
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
flush_tlb_page_ipi(void * info)623*4882a593Smuzhiyun static void flush_tlb_page_ipi(void *info)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun struct flush_tlb_data *fd = info;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun local_flush_tlb_page(fd->vma, fd->addr1);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)630*4882a593Smuzhiyun void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun u32 old_mmid;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun preempt_disable();
635*4882a593Smuzhiyun if (cpu_has_mmid) {
636*4882a593Smuzhiyun htw_stop();
637*4882a593Smuzhiyun old_mmid = read_c0_memorymapid();
638*4882a593Smuzhiyun write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
639*4882a593Smuzhiyun mtc0_tlbw_hazard();
640*4882a593Smuzhiyun ginvt_va_mmid(page);
641*4882a593Smuzhiyun sync_ginv();
642*4882a593Smuzhiyun write_c0_memorymapid(old_mmid);
643*4882a593Smuzhiyun instruction_hazard();
644*4882a593Smuzhiyun htw_start();
645*4882a593Smuzhiyun } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
646*4882a593Smuzhiyun (current->mm != vma->vm_mm)) {
647*4882a593Smuzhiyun struct flush_tlb_data fd = {
648*4882a593Smuzhiyun .vma = vma,
649*4882a593Smuzhiyun .addr1 = page,
650*4882a593Smuzhiyun };
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
653*4882a593Smuzhiyun local_flush_tlb_page(vma, page);
654*4882a593Smuzhiyun } else {
655*4882a593Smuzhiyun unsigned int cpu;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun for_each_online_cpu(cpu) {
658*4882a593Smuzhiyun /*
659*4882a593Smuzhiyun * flush_cache_page() only does partial flushes, so
660*4882a593Smuzhiyun * invalidate ASID without it appearing to
661*4882a593Smuzhiyun * has_valid_asid() as if mm has been completely unused
662*4882a593Smuzhiyun * by that CPU.
663*4882a593Smuzhiyun */
664*4882a593Smuzhiyun if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
665*4882a593Smuzhiyun set_cpu_context(cpu, vma->vm_mm, 1);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun local_flush_tlb_page(vma, page);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun preempt_enable();
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
flush_tlb_one_ipi(void * info)672*4882a593Smuzhiyun static void flush_tlb_one_ipi(void *info)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun unsigned long vaddr = (unsigned long) info;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun local_flush_tlb_one(vaddr);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
flush_tlb_one(unsigned long vaddr)679*4882a593Smuzhiyun void flush_tlb_one(unsigned long vaddr)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun EXPORT_SYMBOL(flush_tlb_page);
685*4882a593Smuzhiyun EXPORT_SYMBOL(flush_tlb_one);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
690*4882a593Smuzhiyun
tick_broadcast(const struct cpumask * mask)691*4882a593Smuzhiyun void tick_broadcast(const struct cpumask *mask)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun call_single_data_t *csd;
694*4882a593Smuzhiyun int cpu;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun for_each_cpu(cpu, mask) {
697*4882a593Smuzhiyun csd = &per_cpu(tick_broadcast_csd, cpu);
698*4882a593Smuzhiyun smp_call_function_single_async(cpu, csd);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
tick_broadcast_callee(void * info)702*4882a593Smuzhiyun static void tick_broadcast_callee(void *info)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun tick_receive_broadcast();
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
tick_broadcast_init(void)707*4882a593Smuzhiyun static int __init tick_broadcast_init(void)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun call_single_data_t *csd;
710*4882a593Smuzhiyun int cpu;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun for (cpu = 0; cpu < NR_CPUS; cpu++) {
713*4882a593Smuzhiyun csd = &per_cpu(tick_broadcast_csd, cpu);
714*4882a593Smuzhiyun csd->func = tick_broadcast_callee;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun return 0;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun early_initcall(tick_broadcast_init);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
722