1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * SMP support for power macintosh.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * We support both the old "powersurge" SMP architecture
6*4882a593Smuzhiyun * and the current Core99 (G4 PowerMac) machines.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Note that we don't support the very first rev. of
9*4882a593Smuzhiyun * Apple/DayStar 2 CPUs board, the one with the funky
10*4882a593Smuzhiyun * watchdog. Hopefully, none of these should be there except
11*4882a593Smuzhiyun * maybe internally to Apple. I should probably still add some
12*4882a593Smuzhiyun * code to detect this card though and disable SMP. --BenH.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
15*4882a593Smuzhiyun * and Ben Herrenschmidt <benh@kernel.crashing.org>.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Support for DayStar quad CPU cards
18*4882a593Smuzhiyun * Copyright (C) XLR8, Inc. 1994-2000
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/sched.h>
22*4882a593Smuzhiyun #include <linux/sched/hotplug.h>
23*4882a593Smuzhiyun #include <linux/smp.h>
24*4882a593Smuzhiyun #include <linux/interrupt.h>
25*4882a593Smuzhiyun #include <linux/kernel_stat.h>
26*4882a593Smuzhiyun #include <linux/delay.h>
27*4882a593Smuzhiyun #include <linux/init.h>
28*4882a593Smuzhiyun #include <linux/spinlock.h>
29*4882a593Smuzhiyun #include <linux/errno.h>
30*4882a593Smuzhiyun #include <linux/hardirq.h>
31*4882a593Smuzhiyun #include <linux/cpu.h>
32*4882a593Smuzhiyun #include <linux/compiler.h>
33*4882a593Smuzhiyun #include <linux/pgtable.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <asm/ptrace.h>
36*4882a593Smuzhiyun #include <linux/atomic.h>
37*4882a593Smuzhiyun #include <asm/code-patching.h>
38*4882a593Smuzhiyun #include <asm/irq.h>
39*4882a593Smuzhiyun #include <asm/page.h>
40*4882a593Smuzhiyun #include <asm/sections.h>
41*4882a593Smuzhiyun #include <asm/io.h>
42*4882a593Smuzhiyun #include <asm/prom.h>
43*4882a593Smuzhiyun #include <asm/smp.h>
44*4882a593Smuzhiyun #include <asm/machdep.h>
45*4882a593Smuzhiyun #include <asm/pmac_feature.h>
46*4882a593Smuzhiyun #include <asm/time.h>
47*4882a593Smuzhiyun #include <asm/mpic.h>
48*4882a593Smuzhiyun #include <asm/cacheflush.h>
49*4882a593Smuzhiyun #include <asm/keylargo.h>
50*4882a593Smuzhiyun #include <asm/pmac_low_i2c.h>
51*4882a593Smuzhiyun #include <asm/pmac_pfunc.h>
52*4882a593Smuzhiyun #include <asm/inst.h>
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #include "pmac.h"
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #undef DEBUG
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #ifdef DEBUG
59*4882a593Smuzhiyun #define DBG(fmt...) udbg_printf(fmt)
60*4882a593Smuzhiyun #else
61*4882a593Smuzhiyun #define DBG(fmt...)
62*4882a593Smuzhiyun #endif
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun extern void __secondary_start_pmac_0(void);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static void (*pmac_tb_freeze)(int freeze);
67*4882a593Smuzhiyun static u64 timebase;
68*4882a593Smuzhiyun static int tb_req;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #ifdef CONFIG_PPC_PMAC32_PSURGE
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Powersurge (old powermac SMP) support.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Addresses for powersurge registers */
77*4882a593Smuzhiyun #define HAMMERHEAD_BASE 0xf8000000
78*4882a593Smuzhiyun #define HHEAD_CONFIG 0x90
79*4882a593Smuzhiyun #define HHEAD_SEC_INTR 0xc0
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* register for interrupting the primary processor on the powersurge */
82*4882a593Smuzhiyun /* N.B. this is actually the ethernet ROM! */
83*4882a593Smuzhiyun #define PSURGE_PRI_INTR 0xf3019000
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* register for storing the start address for the secondary processor */
86*4882a593Smuzhiyun /* N.B. this is the PCI config space address register for the 1st bridge */
87*4882a593Smuzhiyun #define PSURGE_START 0xf2800000
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* Daystar/XLR8 4-CPU card */
90*4882a593Smuzhiyun #define PSURGE_QUAD_REG_ADDR 0xf8800000
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #define PSURGE_QUAD_IRQ_SET 0
93*4882a593Smuzhiyun #define PSURGE_QUAD_IRQ_CLR 1
94*4882a593Smuzhiyun #define PSURGE_QUAD_IRQ_PRIMARY 2
95*4882a593Smuzhiyun #define PSURGE_QUAD_CKSTOP_CTL 3
96*4882a593Smuzhiyun #define PSURGE_QUAD_PRIMARY_ARB 4
97*4882a593Smuzhiyun #define PSURGE_QUAD_BOARD_ID 6
98*4882a593Smuzhiyun #define PSURGE_QUAD_WHICH_CPU 7
99*4882a593Smuzhiyun #define PSURGE_QUAD_CKSTOP_RDBK 8
100*4882a593Smuzhiyun #define PSURGE_QUAD_RESET_CTL 11
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
103*4882a593Smuzhiyun #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
104*4882a593Smuzhiyun #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
105*4882a593Smuzhiyun #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* virtual addresses for the above */
108*4882a593Smuzhiyun static volatile u8 __iomem *hhead_base;
109*4882a593Smuzhiyun static volatile u8 __iomem *quad_base;
110*4882a593Smuzhiyun static volatile u32 __iomem *psurge_pri_intr;
111*4882a593Smuzhiyun static volatile u8 __iomem *psurge_sec_intr;
112*4882a593Smuzhiyun static volatile u32 __iomem *psurge_start;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* values for psurge_type */
115*4882a593Smuzhiyun #define PSURGE_NONE -1
116*4882a593Smuzhiyun #define PSURGE_DUAL 0
117*4882a593Smuzhiyun #define PSURGE_QUAD_OKEE 1
118*4882a593Smuzhiyun #define PSURGE_QUAD_COTTON 2
119*4882a593Smuzhiyun #define PSURGE_QUAD_ICEGRASS 3
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* what sort of powersurge board we have */
122*4882a593Smuzhiyun static int psurge_type = PSURGE_NONE;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* irq for secondary cpus to report */
125*4882a593Smuzhiyun static struct irq_domain *psurge_host;
126*4882a593Smuzhiyun int psurge_secondary_virq;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun * Set and clear IPIs for powersurge.
130*4882a593Smuzhiyun */
psurge_set_ipi(int cpu)131*4882a593Smuzhiyun static inline void psurge_set_ipi(int cpu)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun if (psurge_type == PSURGE_NONE)
134*4882a593Smuzhiyun return;
135*4882a593Smuzhiyun if (cpu == 0)
136*4882a593Smuzhiyun in_be32(psurge_pri_intr);
137*4882a593Smuzhiyun else if (psurge_type == PSURGE_DUAL)
138*4882a593Smuzhiyun out_8(psurge_sec_intr, 0);
139*4882a593Smuzhiyun else
140*4882a593Smuzhiyun PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
psurge_clr_ipi(int cpu)143*4882a593Smuzhiyun static inline void psurge_clr_ipi(int cpu)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun if (cpu > 0) {
146*4882a593Smuzhiyun switch(psurge_type) {
147*4882a593Smuzhiyun case PSURGE_DUAL:
148*4882a593Smuzhiyun out_8(psurge_sec_intr, ~0);
149*4882a593Smuzhiyun case PSURGE_NONE:
150*4882a593Smuzhiyun break;
151*4882a593Smuzhiyun default:
152*4882a593Smuzhiyun PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * On powersurge (old SMP powermac architecture) we don't have
159*4882a593Smuzhiyun * separate IPIs for separate messages like openpic does. Instead
160*4882a593Smuzhiyun * use the generic demux helpers
161*4882a593Smuzhiyun * -- paulus.
162*4882a593Smuzhiyun */
psurge_ipi_intr(int irq,void * d)163*4882a593Smuzhiyun static irqreturn_t psurge_ipi_intr(int irq, void *d)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun psurge_clr_ipi(smp_processor_id());
166*4882a593Smuzhiyun smp_ipi_demux();
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun return IRQ_HANDLED;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
smp_psurge_cause_ipi(int cpu)171*4882a593Smuzhiyun static void smp_psurge_cause_ipi(int cpu)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun psurge_set_ipi(cpu);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
psurge_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)176*4882a593Smuzhiyun static int psurge_host_map(struct irq_domain *h, unsigned int virq,
177*4882a593Smuzhiyun irq_hw_number_t hw)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun return 0;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun static const struct irq_domain_ops psurge_host_ops = {
185*4882a593Smuzhiyun .map = psurge_host_map,
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun
psurge_secondary_ipi_init(void)188*4882a593Smuzhiyun static int psurge_secondary_ipi_init(void)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun int rc = -ENOMEM;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (psurge_host)
195*4882a593Smuzhiyun psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (psurge_secondary_virq)
198*4882a593Smuzhiyun rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
199*4882a593Smuzhiyun IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (rc)
202*4882a593Smuzhiyun pr_err("Failed to setup secondary cpu IPI\n");
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun return rc;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * Determine a quad card presence. We read the board ID register, we
209*4882a593Smuzhiyun * force the data bus to change to something else, and we read it again.
210*4882a593Smuzhiyun * It it's stable, then the register probably exist (ugh !)
211*4882a593Smuzhiyun */
psurge_quad_probe(void)212*4882a593Smuzhiyun static int __init psurge_quad_probe(void)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun int type;
215*4882a593Smuzhiyun unsigned int i;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
218*4882a593Smuzhiyun if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
219*4882a593Smuzhiyun || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
220*4882a593Smuzhiyun return PSURGE_DUAL;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* looks OK, try a slightly more rigorous test */
223*4882a593Smuzhiyun /* bogus is not necessarily cacheline-aligned,
224*4882a593Smuzhiyun though I don't suppose that really matters. -- paulus */
225*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
226*4882a593Smuzhiyun volatile u32 bogus[8];
227*4882a593Smuzhiyun bogus[(0+i)%8] = 0x00000000;
228*4882a593Smuzhiyun bogus[(1+i)%8] = 0x55555555;
229*4882a593Smuzhiyun bogus[(2+i)%8] = 0xFFFFFFFF;
230*4882a593Smuzhiyun bogus[(3+i)%8] = 0xAAAAAAAA;
231*4882a593Smuzhiyun bogus[(4+i)%8] = 0x33333333;
232*4882a593Smuzhiyun bogus[(5+i)%8] = 0xCCCCCCCC;
233*4882a593Smuzhiyun bogus[(6+i)%8] = 0xCCCCCCCC;
234*4882a593Smuzhiyun bogus[(7+i)%8] = 0x33333333;
235*4882a593Smuzhiyun wmb();
236*4882a593Smuzhiyun asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
237*4882a593Smuzhiyun mb();
238*4882a593Smuzhiyun if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
239*4882a593Smuzhiyun return PSURGE_DUAL;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun return type;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
psurge_quad_init(void)244*4882a593Smuzhiyun static void __init psurge_quad_init(void)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun int procbits;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
249*4882a593Smuzhiyun procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
250*4882a593Smuzhiyun if (psurge_type == PSURGE_QUAD_ICEGRASS)
251*4882a593Smuzhiyun PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
252*4882a593Smuzhiyun else
253*4882a593Smuzhiyun PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
254*4882a593Smuzhiyun mdelay(33);
255*4882a593Smuzhiyun out_8(psurge_sec_intr, ~0);
256*4882a593Smuzhiyun PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
257*4882a593Smuzhiyun PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
258*4882a593Smuzhiyun if (psurge_type != PSURGE_QUAD_ICEGRASS)
259*4882a593Smuzhiyun PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
260*4882a593Smuzhiyun PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
261*4882a593Smuzhiyun mdelay(33);
262*4882a593Smuzhiyun PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
263*4882a593Smuzhiyun mdelay(33);
264*4882a593Smuzhiyun PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
265*4882a593Smuzhiyun mdelay(33);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
smp_psurge_probe(void)268*4882a593Smuzhiyun static void __init smp_psurge_probe(void)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun int i, ncpus;
271*4882a593Smuzhiyun struct device_node *dn;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * The powersurge cpu board can be used in the generation
275*4882a593Smuzhiyun * of powermacs that have a socket for an upgradeable cpu card,
276*4882a593Smuzhiyun * including the 7500, 8500, 9500, 9600.
277*4882a593Smuzhiyun * The device tree doesn't tell you if you have 2 cpus because
278*4882a593Smuzhiyun * OF doesn't know anything about the 2nd processor.
279*4882a593Smuzhiyun * Instead we look for magic bits in magic registers,
280*4882a593Smuzhiyun * in the hammerhead memory controller in the case of the
281*4882a593Smuzhiyun * dual-cpu powersurge board. -- paulus.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun dn = of_find_node_by_name(NULL, "hammerhead");
284*4882a593Smuzhiyun if (dn == NULL)
285*4882a593Smuzhiyun return;
286*4882a593Smuzhiyun of_node_put(dn);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
289*4882a593Smuzhiyun quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
290*4882a593Smuzhiyun psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun psurge_type = psurge_quad_probe();
293*4882a593Smuzhiyun if (psurge_type != PSURGE_DUAL) {
294*4882a593Smuzhiyun psurge_quad_init();
295*4882a593Smuzhiyun /* All released cards using this HW design have 4 CPUs */
296*4882a593Smuzhiyun ncpus = 4;
297*4882a593Smuzhiyun /* No sure how timebase sync works on those, let's use SW */
298*4882a593Smuzhiyun smp_ops->give_timebase = smp_generic_give_timebase;
299*4882a593Smuzhiyun smp_ops->take_timebase = smp_generic_take_timebase;
300*4882a593Smuzhiyun } else {
301*4882a593Smuzhiyun iounmap(quad_base);
302*4882a593Smuzhiyun if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
303*4882a593Smuzhiyun /* not a dual-cpu card */
304*4882a593Smuzhiyun iounmap(hhead_base);
305*4882a593Smuzhiyun psurge_type = PSURGE_NONE;
306*4882a593Smuzhiyun return;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun ncpus = 2;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (psurge_secondary_ipi_init())
312*4882a593Smuzhiyun return;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun psurge_start = ioremap(PSURGE_START, 4);
315*4882a593Smuzhiyun psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* This is necessary because OF doesn't know about the
318*4882a593Smuzhiyun * secondary cpu(s), and thus there aren't nodes in the
319*4882a593Smuzhiyun * device tree for them, and smp_setup_cpu_maps hasn't
320*4882a593Smuzhiyun * set their bits in cpu_present_mask.
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun if (ncpus > NR_CPUS)
323*4882a593Smuzhiyun ncpus = NR_CPUS;
324*4882a593Smuzhiyun for (i = 1; i < ncpus ; ++i)
325*4882a593Smuzhiyun set_cpu_present(i, true);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
smp_psurge_kick_cpu(int nr)330*4882a593Smuzhiyun static int __init smp_psurge_kick_cpu(int nr)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
333*4882a593Smuzhiyun unsigned long a, flags;
334*4882a593Smuzhiyun int i, j;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* Defining this here is evil ... but I prefer hiding that
337*4882a593Smuzhiyun * crap to avoid giving people ideas that they can do the
338*4882a593Smuzhiyun * same.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun extern volatile unsigned int cpu_callin_map[NR_CPUS];
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* may need to flush here if secondary bats aren't setup */
343*4882a593Smuzhiyun for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
344*4882a593Smuzhiyun asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
345*4882a593Smuzhiyun asm volatile("sync");
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* This is going to freeze the timeebase, we disable interrupts */
350*4882a593Smuzhiyun local_irq_save(flags);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun out_be32(psurge_start, start);
353*4882a593Smuzhiyun mb();
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun psurge_set_ipi(nr);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * We can't use udelay here because the timebase is now frozen.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun for (i = 0; i < 2000; ++i)
361*4882a593Smuzhiyun asm volatile("nop" : : : "memory");
362*4882a593Smuzhiyun psurge_clr_ipi(nr);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * Also, because the timebase is frozen, we must not return to the
366*4882a593Smuzhiyun * caller which will try to do udelay's etc... Instead, we wait -here-
367*4882a593Smuzhiyun * for the CPU to callin.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
370*4882a593Smuzhiyun for (j = 1; j < 10000; j++)
371*4882a593Smuzhiyun asm volatile("nop" : : : "memory");
372*4882a593Smuzhiyun asm volatile("sync" : : : "memory");
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun if (!cpu_callin_map[nr])
375*4882a593Smuzhiyun goto stuck;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* And we do the TB sync here too for standard dual CPU cards */
378*4882a593Smuzhiyun if (psurge_type == PSURGE_DUAL) {
379*4882a593Smuzhiyun while(!tb_req)
380*4882a593Smuzhiyun barrier();
381*4882a593Smuzhiyun tb_req = 0;
382*4882a593Smuzhiyun mb();
383*4882a593Smuzhiyun timebase = get_tb();
384*4882a593Smuzhiyun mb();
385*4882a593Smuzhiyun while (timebase)
386*4882a593Smuzhiyun barrier();
387*4882a593Smuzhiyun mb();
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun stuck:
390*4882a593Smuzhiyun /* now interrupt the secondary, restarting both TBs */
391*4882a593Smuzhiyun if (psurge_type == PSURGE_DUAL)
392*4882a593Smuzhiyun psurge_set_ipi(1);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
smp_psurge_setup_cpu(int cpu_nr)399*4882a593Smuzhiyun static void __init smp_psurge_setup_cpu(int cpu_nr)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun unsigned long flags = IRQF_PERCPU | IRQF_NO_THREAD;
402*4882a593Smuzhiyun int irq;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun if (cpu_nr != 0 || !psurge_start)
405*4882a593Smuzhiyun return;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* reset the entry point so if we get another intr we won't
408*4882a593Smuzhiyun * try to startup again */
409*4882a593Smuzhiyun out_be32(psurge_start, 0x100);
410*4882a593Smuzhiyun irq = irq_create_mapping(NULL, 30);
411*4882a593Smuzhiyun if (request_irq(irq, psurge_ipi_intr, flags, "primary IPI", NULL))
412*4882a593Smuzhiyun printk(KERN_ERR "Couldn't get primary IPI interrupt");
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
smp_psurge_take_timebase(void)415*4882a593Smuzhiyun void __init smp_psurge_take_timebase(void)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun if (psurge_type != PSURGE_DUAL)
418*4882a593Smuzhiyun return;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun tb_req = 1;
421*4882a593Smuzhiyun mb();
422*4882a593Smuzhiyun while (!timebase)
423*4882a593Smuzhiyun barrier();
424*4882a593Smuzhiyun mb();
425*4882a593Smuzhiyun set_tb(timebase >> 32, timebase & 0xffffffff);
426*4882a593Smuzhiyun timebase = 0;
427*4882a593Smuzhiyun mb();
428*4882a593Smuzhiyun set_dec(tb_ticks_per_jiffy/2);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
smp_psurge_give_timebase(void)431*4882a593Smuzhiyun void __init smp_psurge_give_timebase(void)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun /* Nothing to do here */
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /* PowerSurge-style Macs */
437*4882a593Smuzhiyun struct smp_ops_t psurge_smp_ops = {
438*4882a593Smuzhiyun .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
439*4882a593Smuzhiyun .cause_ipi = smp_psurge_cause_ipi,
440*4882a593Smuzhiyun .cause_nmi_ipi = NULL,
441*4882a593Smuzhiyun .probe = smp_psurge_probe,
442*4882a593Smuzhiyun .kick_cpu = smp_psurge_kick_cpu,
443*4882a593Smuzhiyun .setup_cpu = smp_psurge_setup_cpu,
444*4882a593Smuzhiyun .give_timebase = smp_psurge_give_timebase,
445*4882a593Smuzhiyun .take_timebase = smp_psurge_take_timebase,
446*4882a593Smuzhiyun };
447*4882a593Smuzhiyun #endif /* CONFIG_PPC_PMAC32_PSURGE */
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * Core 99 and later support
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun
smp_core99_give_timebase(void)454*4882a593Smuzhiyun static void smp_core99_give_timebase(void)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun unsigned long flags;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun local_irq_save(flags);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun while(!tb_req)
461*4882a593Smuzhiyun barrier();
462*4882a593Smuzhiyun tb_req = 0;
463*4882a593Smuzhiyun (*pmac_tb_freeze)(1);
464*4882a593Smuzhiyun mb();
465*4882a593Smuzhiyun timebase = get_tb();
466*4882a593Smuzhiyun mb();
467*4882a593Smuzhiyun while (timebase)
468*4882a593Smuzhiyun barrier();
469*4882a593Smuzhiyun mb();
470*4882a593Smuzhiyun (*pmac_tb_freeze)(0);
471*4882a593Smuzhiyun mb();
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun local_irq_restore(flags);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun
smp_core99_take_timebase(void)477*4882a593Smuzhiyun static void smp_core99_take_timebase(void)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun unsigned long flags;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun local_irq_save(flags);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun tb_req = 1;
484*4882a593Smuzhiyun mb();
485*4882a593Smuzhiyun while (!timebase)
486*4882a593Smuzhiyun barrier();
487*4882a593Smuzhiyun mb();
488*4882a593Smuzhiyun set_tb(timebase >> 32, timebase & 0xffffffff);
489*4882a593Smuzhiyun timebase = 0;
490*4882a593Smuzhiyun mb();
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun local_irq_restore(flags);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun #ifdef CONFIG_PPC64
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * G5s enable/disable the timebase via an i2c-connected clock chip.
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun static struct pmac_i2c_bus *pmac_tb_clock_chip_host;
500*4882a593Smuzhiyun static u8 pmac_tb_pulsar_addr;
501*4882a593Smuzhiyun
smp_core99_cypress_tb_freeze(int freeze)502*4882a593Smuzhiyun static void smp_core99_cypress_tb_freeze(int freeze)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun u8 data;
505*4882a593Smuzhiyun int rc;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* Strangely, the device-tree says address is 0xd2, but darwin
508*4882a593Smuzhiyun * accesses 0xd0 ...
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun pmac_i2c_setmode(pmac_tb_clock_chip_host,
511*4882a593Smuzhiyun pmac_i2c_mode_combined);
512*4882a593Smuzhiyun rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
513*4882a593Smuzhiyun 0xd0 | pmac_i2c_read,
514*4882a593Smuzhiyun 1, 0x81, &data, 1);
515*4882a593Smuzhiyun if (rc != 0)
516*4882a593Smuzhiyun goto bail;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
521*4882a593Smuzhiyun rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
522*4882a593Smuzhiyun 0xd0 | pmac_i2c_write,
523*4882a593Smuzhiyun 1, 0x81, &data, 1);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun bail:
526*4882a593Smuzhiyun if (rc != 0) {
527*4882a593Smuzhiyun printk("Cypress Timebase %s rc: %d\n",
528*4882a593Smuzhiyun freeze ? "freeze" : "unfreeze", rc);
529*4882a593Smuzhiyun panic("Timebase freeze failed !\n");
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun
smp_core99_pulsar_tb_freeze(int freeze)534*4882a593Smuzhiyun static void smp_core99_pulsar_tb_freeze(int freeze)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun u8 data;
537*4882a593Smuzhiyun int rc;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun pmac_i2c_setmode(pmac_tb_clock_chip_host,
540*4882a593Smuzhiyun pmac_i2c_mode_combined);
541*4882a593Smuzhiyun rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
542*4882a593Smuzhiyun pmac_tb_pulsar_addr | pmac_i2c_read,
543*4882a593Smuzhiyun 1, 0x2e, &data, 1);
544*4882a593Smuzhiyun if (rc != 0)
545*4882a593Smuzhiyun goto bail;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun data = (data & 0x88) | (freeze ? 0x11 : 0x22);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
550*4882a593Smuzhiyun rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
551*4882a593Smuzhiyun pmac_tb_pulsar_addr | pmac_i2c_write,
552*4882a593Smuzhiyun 1, 0x2e, &data, 1);
553*4882a593Smuzhiyun bail:
554*4882a593Smuzhiyun if (rc != 0) {
555*4882a593Smuzhiyun printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
556*4882a593Smuzhiyun freeze ? "freeze" : "unfreeze", rc);
557*4882a593Smuzhiyun panic("Timebase freeze failed !\n");
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
smp_core99_setup_i2c_hwsync(int ncpus)561*4882a593Smuzhiyun static void __init smp_core99_setup_i2c_hwsync(int ncpus)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct device_node *cc = NULL;
564*4882a593Smuzhiyun struct device_node *p;
565*4882a593Smuzhiyun const char *name = NULL;
566*4882a593Smuzhiyun const u32 *reg;
567*4882a593Smuzhiyun int ok;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /* Look for the clock chip */
570*4882a593Smuzhiyun for_each_node_by_name(cc, "i2c-hwclock") {
571*4882a593Smuzhiyun p = of_get_parent(cc);
572*4882a593Smuzhiyun ok = p && of_device_is_compatible(p, "uni-n-i2c");
573*4882a593Smuzhiyun of_node_put(p);
574*4882a593Smuzhiyun if (!ok)
575*4882a593Smuzhiyun continue;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc);
578*4882a593Smuzhiyun if (pmac_tb_clock_chip_host == NULL)
579*4882a593Smuzhiyun continue;
580*4882a593Smuzhiyun reg = of_get_property(cc, "reg", NULL);
581*4882a593Smuzhiyun if (reg == NULL)
582*4882a593Smuzhiyun continue;
583*4882a593Smuzhiyun switch (*reg) {
584*4882a593Smuzhiyun case 0xd2:
585*4882a593Smuzhiyun if (of_device_is_compatible(cc,"pulsar-legacy-slewing")) {
586*4882a593Smuzhiyun pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
587*4882a593Smuzhiyun pmac_tb_pulsar_addr = 0xd2;
588*4882a593Smuzhiyun name = "Pulsar";
589*4882a593Smuzhiyun } else if (of_device_is_compatible(cc, "cy28508")) {
590*4882a593Smuzhiyun pmac_tb_freeze = smp_core99_cypress_tb_freeze;
591*4882a593Smuzhiyun name = "Cypress";
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun break;
594*4882a593Smuzhiyun case 0xd4:
595*4882a593Smuzhiyun pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
596*4882a593Smuzhiyun pmac_tb_pulsar_addr = 0xd4;
597*4882a593Smuzhiyun name = "Pulsar";
598*4882a593Smuzhiyun break;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun if (pmac_tb_freeze != NULL)
601*4882a593Smuzhiyun break;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun if (pmac_tb_freeze != NULL) {
604*4882a593Smuzhiyun /* Open i2c bus for synchronous access */
605*4882a593Smuzhiyun if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) {
606*4882a593Smuzhiyun printk(KERN_ERR "Failed top open i2c bus for clock"
607*4882a593Smuzhiyun " sync, fallback to software sync !\n");
608*4882a593Smuzhiyun goto no_i2c_sync;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun printk(KERN_INFO "Processor timebase sync using %s i2c clock\n",
611*4882a593Smuzhiyun name);
612*4882a593Smuzhiyun return;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun no_i2c_sync:
615*4882a593Smuzhiyun pmac_tb_freeze = NULL;
616*4882a593Smuzhiyun pmac_tb_clock_chip_host = NULL;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /*
622*4882a593Smuzhiyun * Newer G5s uses a platform function
623*4882a593Smuzhiyun */
624*4882a593Smuzhiyun
smp_core99_pfunc_tb_freeze(int freeze)625*4882a593Smuzhiyun static void smp_core99_pfunc_tb_freeze(int freeze)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun struct device_node *cpus;
628*4882a593Smuzhiyun struct pmf_args args;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun cpus = of_find_node_by_path("/cpus");
631*4882a593Smuzhiyun BUG_ON(cpus == NULL);
632*4882a593Smuzhiyun args.count = 1;
633*4882a593Smuzhiyun args.u[0].v = !freeze;
634*4882a593Smuzhiyun pmf_call_function(cpus, "cpu-timebase", &args);
635*4882a593Smuzhiyun of_node_put(cpus);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun #else /* CONFIG_PPC64 */
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /*
641*4882a593Smuzhiyun * SMP G4 use a GPIO to enable/disable the timebase.
642*4882a593Smuzhiyun */
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
645*4882a593Smuzhiyun
smp_core99_gpio_tb_freeze(int freeze)646*4882a593Smuzhiyun static void smp_core99_gpio_tb_freeze(int freeze)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun if (freeze)
649*4882a593Smuzhiyun pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
650*4882a593Smuzhiyun else
651*4882a593Smuzhiyun pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
652*4882a593Smuzhiyun pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun #endif /* !CONFIG_PPC64 */
657*4882a593Smuzhiyun
core99_init_caches(int cpu)658*4882a593Smuzhiyun static void core99_init_caches(int cpu)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun #ifndef CONFIG_PPC64
661*4882a593Smuzhiyun /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
662*4882a593Smuzhiyun static long int core99_l2_cache;
663*4882a593Smuzhiyun static long int core99_l3_cache;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_L2CR))
666*4882a593Smuzhiyun return;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if (cpu == 0) {
669*4882a593Smuzhiyun core99_l2_cache = _get_L2CR();
670*4882a593Smuzhiyun printk("CPU0: L2CR is %lx\n", core99_l2_cache);
671*4882a593Smuzhiyun } else {
672*4882a593Smuzhiyun printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
673*4882a593Smuzhiyun _set_L2CR(0);
674*4882a593Smuzhiyun _set_L2CR(core99_l2_cache);
675*4882a593Smuzhiyun printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_L3CR))
679*4882a593Smuzhiyun return;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (cpu == 0){
682*4882a593Smuzhiyun core99_l3_cache = _get_L3CR();
683*4882a593Smuzhiyun printk("CPU0: L3CR is %lx\n", core99_l3_cache);
684*4882a593Smuzhiyun } else {
685*4882a593Smuzhiyun printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
686*4882a593Smuzhiyun _set_L3CR(0);
687*4882a593Smuzhiyun _set_L3CR(core99_l3_cache);
688*4882a593Smuzhiyun printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun #endif /* !CONFIG_PPC64 */
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
smp_core99_setup(int ncpus)693*4882a593Smuzhiyun static void __init smp_core99_setup(int ncpus)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun #ifdef CONFIG_PPC64
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /* i2c based HW sync on some G5s */
698*4882a593Smuzhiyun if (of_machine_is_compatible("PowerMac7,2") ||
699*4882a593Smuzhiyun of_machine_is_compatible("PowerMac7,3") ||
700*4882a593Smuzhiyun of_machine_is_compatible("RackMac3,1"))
701*4882a593Smuzhiyun smp_core99_setup_i2c_hwsync(ncpus);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* pfunc based HW sync on recent G5s */
704*4882a593Smuzhiyun if (pmac_tb_freeze == NULL) {
705*4882a593Smuzhiyun struct device_node *cpus =
706*4882a593Smuzhiyun of_find_node_by_path("/cpus");
707*4882a593Smuzhiyun if (cpus &&
708*4882a593Smuzhiyun of_get_property(cpus, "platform-cpu-timebase", NULL)) {
709*4882a593Smuzhiyun pmac_tb_freeze = smp_core99_pfunc_tb_freeze;
710*4882a593Smuzhiyun printk(KERN_INFO "Processor timebase sync using"
711*4882a593Smuzhiyun " platform function\n");
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun #else /* CONFIG_PPC64 */
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* GPIO based HW sync on ppc32 Core99 */
718*4882a593Smuzhiyun if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
719*4882a593Smuzhiyun struct device_node *cpu;
720*4882a593Smuzhiyun const u32 *tbprop = NULL;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
723*4882a593Smuzhiyun cpu = of_find_node_by_type(NULL, "cpu");
724*4882a593Smuzhiyun if (cpu != NULL) {
725*4882a593Smuzhiyun tbprop = of_get_property(cpu, "timebase-enable", NULL);
726*4882a593Smuzhiyun if (tbprop)
727*4882a593Smuzhiyun core99_tb_gpio = *tbprop;
728*4882a593Smuzhiyun of_node_put(cpu);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun pmac_tb_freeze = smp_core99_gpio_tb_freeze;
731*4882a593Smuzhiyun printk(KERN_INFO "Processor timebase sync using"
732*4882a593Smuzhiyun " GPIO 0x%02x\n", core99_tb_gpio);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /* No timebase sync, fallback to software */
738*4882a593Smuzhiyun if (pmac_tb_freeze == NULL) {
739*4882a593Smuzhiyun smp_ops->give_timebase = smp_generic_give_timebase;
740*4882a593Smuzhiyun smp_ops->take_timebase = smp_generic_take_timebase;
741*4882a593Smuzhiyun printk(KERN_INFO "Processor timebase sync using software\n");
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun #ifndef CONFIG_PPC64
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun int i;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /* XXX should get this from reg properties */
749*4882a593Smuzhiyun for (i = 1; i < ncpus; ++i)
750*4882a593Smuzhiyun set_hard_smp_processor_id(i, i);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun #endif
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* 32 bits SMP can't NAP */
755*4882a593Smuzhiyun if (!of_machine_is_compatible("MacRISC4"))
756*4882a593Smuzhiyun powersave_nap = 0;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
smp_core99_probe(void)759*4882a593Smuzhiyun static void __init smp_core99_probe(void)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun struct device_node *cpus;
762*4882a593Smuzhiyun int ncpus = 0;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /* Count CPUs in the device-tree */
767*4882a593Smuzhiyun for_each_node_by_type(cpus, "cpu")
768*4882a593Smuzhiyun ++ncpus;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /* Nothing more to do if less than 2 of them */
773*4882a593Smuzhiyun if (ncpus <= 1)
774*4882a593Smuzhiyun return;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun /* We need to perform some early initialisations before we can start
777*4882a593Smuzhiyun * setting up SMP as we are running before initcalls
778*4882a593Smuzhiyun */
779*4882a593Smuzhiyun pmac_pfunc_base_install();
780*4882a593Smuzhiyun pmac_i2c_init();
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /* Setup various bits like timebase sync method, ability to nap, ... */
783*4882a593Smuzhiyun smp_core99_setup(ncpus);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /* Install IPIs */
786*4882a593Smuzhiyun mpic_request_ipis();
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /* Collect l2cr and l3cr values from CPU 0 */
789*4882a593Smuzhiyun core99_init_caches(0);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
smp_core99_kick_cpu(int nr)792*4882a593Smuzhiyun static int smp_core99_kick_cpu(int nr)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun unsigned int save_vector;
795*4882a593Smuzhiyun unsigned long target, flags;
796*4882a593Smuzhiyun unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (nr < 0 || nr > 3)
799*4882a593Smuzhiyun return -ENOENT;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (ppc_md.progress)
802*4882a593Smuzhiyun ppc_md.progress("smp_core99_kick_cpu", 0x346);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun local_irq_save(flags);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /* Save reset vector */
807*4882a593Smuzhiyun save_vector = *vector;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /* Setup fake reset vector that does
810*4882a593Smuzhiyun * b __secondary_start_pmac_0 + nr*8
811*4882a593Smuzhiyun */
812*4882a593Smuzhiyun target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
813*4882a593Smuzhiyun patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /* Put some life in our friend */
816*4882a593Smuzhiyun pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /* FIXME: We wait a bit for the CPU to take the exception, I should
819*4882a593Smuzhiyun * instead wait for the entry code to set something for me. Well,
820*4882a593Smuzhiyun * ideally, all that crap will be done in prom.c and the CPU left
821*4882a593Smuzhiyun * in a RAM-based wait loop like CHRP.
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun mdelay(1);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /* Restore our exception vector */
826*4882a593Smuzhiyun patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun local_irq_restore(flags);
829*4882a593Smuzhiyun if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun return 0;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
smp_core99_setup_cpu(int cpu_nr)834*4882a593Smuzhiyun static void smp_core99_setup_cpu(int cpu_nr)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun /* Setup L2/L3 */
837*4882a593Smuzhiyun if (cpu_nr != 0)
838*4882a593Smuzhiyun core99_init_caches(cpu_nr);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Setup openpic */
841*4882a593Smuzhiyun mpic_setup_this_cpu();
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun #ifdef CONFIG_PPC64
845*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
846*4882a593Smuzhiyun static unsigned int smp_core99_host_open;
847*4882a593Smuzhiyun
smp_core99_cpu_prepare(unsigned int cpu)848*4882a593Smuzhiyun static int smp_core99_cpu_prepare(unsigned int cpu)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun int rc;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /* Open i2c bus if it was used for tb sync */
853*4882a593Smuzhiyun if (pmac_tb_clock_chip_host && !smp_core99_host_open) {
854*4882a593Smuzhiyun rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
855*4882a593Smuzhiyun if (rc) {
856*4882a593Smuzhiyun pr_err("Failed to open i2c bus for time sync\n");
857*4882a593Smuzhiyun return notifier_from_errno(rc);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun smp_core99_host_open = 1;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun return 0;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
smp_core99_cpu_online(unsigned int cpu)864*4882a593Smuzhiyun static int smp_core99_cpu_online(unsigned int cpu)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun /* Close i2c bus if it was used for tb sync */
867*4882a593Smuzhiyun if (pmac_tb_clock_chip_host && smp_core99_host_open) {
868*4882a593Smuzhiyun pmac_i2c_close(pmac_tb_clock_chip_host);
869*4882a593Smuzhiyun smp_core99_host_open = 0;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun return 0;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun #endif /* CONFIG_HOTPLUG_CPU */
874*4882a593Smuzhiyun
smp_core99_bringup_done(void)875*4882a593Smuzhiyun static void __init smp_core99_bringup_done(void)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun extern void g5_phy_disable_cpu1(void);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /* Close i2c bus if it was used for tb sync */
880*4882a593Smuzhiyun if (pmac_tb_clock_chip_host)
881*4882a593Smuzhiyun pmac_i2c_close(pmac_tb_clock_chip_host);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* If we didn't start the second CPU, we must take
884*4882a593Smuzhiyun * it off the bus.
885*4882a593Smuzhiyun */
886*4882a593Smuzhiyun if (of_machine_is_compatible("MacRISC4") &&
887*4882a593Smuzhiyun num_online_cpus() < 2) {
888*4882a593Smuzhiyun set_cpu_present(1, false);
889*4882a593Smuzhiyun g5_phy_disable_cpu1();
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
892*4882a593Smuzhiyun cpuhp_setup_state_nocalls(CPUHP_POWERPC_PMAC_PREPARE,
893*4882a593Smuzhiyun "powerpc/pmac:prepare", smp_core99_cpu_prepare,
894*4882a593Smuzhiyun NULL);
895*4882a593Smuzhiyun cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "powerpc/pmac:online",
896*4882a593Smuzhiyun smp_core99_cpu_online, NULL);
897*4882a593Smuzhiyun #endif
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun if (ppc_md.progress)
900*4882a593Smuzhiyun ppc_md.progress("smp_core99_bringup_done", 0x349);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
905*4882a593Smuzhiyun
smp_core99_cpu_disable(void)906*4882a593Smuzhiyun static int smp_core99_cpu_disable(void)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun int rc = generic_cpu_disable();
909*4882a593Smuzhiyun if (rc)
910*4882a593Smuzhiyun return rc;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun mpic_cpu_set_priority(0xf);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun cleanup_cpu_mmu_context();
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun return 0;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun #ifdef CONFIG_PPC32
920*4882a593Smuzhiyun
pmac_cpu_offline_self(void)921*4882a593Smuzhiyun static void pmac_cpu_offline_self(void)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun int cpu = smp_processor_id();
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun local_irq_disable();
926*4882a593Smuzhiyun idle_task_exit();
927*4882a593Smuzhiyun pr_debug("CPU%d offline\n", cpu);
928*4882a593Smuzhiyun generic_set_cpu_dead(cpu);
929*4882a593Smuzhiyun smp_wmb();
930*4882a593Smuzhiyun mb();
931*4882a593Smuzhiyun low_cpu_offline_self();
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun #else /* CONFIG_PPC32 */
935*4882a593Smuzhiyun
pmac_cpu_offline_self(void)936*4882a593Smuzhiyun static void pmac_cpu_offline_self(void)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun int cpu = smp_processor_id();
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun local_irq_disable();
941*4882a593Smuzhiyun idle_task_exit();
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun /*
944*4882a593Smuzhiyun * turn off as much as possible, we'll be
945*4882a593Smuzhiyun * kicked out as this will only be invoked
946*4882a593Smuzhiyun * on core99 platforms for now ...
947*4882a593Smuzhiyun */
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun printk(KERN_INFO "CPU#%d offline\n", cpu);
950*4882a593Smuzhiyun generic_set_cpu_dead(cpu);
951*4882a593Smuzhiyun smp_wmb();
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun /*
954*4882a593Smuzhiyun * Re-enable interrupts. The NAP code needs to enable them
955*4882a593Smuzhiyun * anyways, do it now so we deal with the case where one already
956*4882a593Smuzhiyun * happened while soft-disabled.
957*4882a593Smuzhiyun * We shouldn't get any external interrupts, only decrementer, and the
958*4882a593Smuzhiyun * decrementer handler is safe for use on offline CPUs
959*4882a593Smuzhiyun */
960*4882a593Smuzhiyun local_irq_enable();
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun while (1) {
963*4882a593Smuzhiyun /* let's not take timer interrupts too often ... */
964*4882a593Smuzhiyun set_dec(0x7fffffff);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /* Enter NAP mode */
967*4882a593Smuzhiyun power4_idle();
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun #endif /* else CONFIG_PPC32 */
972*4882a593Smuzhiyun #endif /* CONFIG_HOTPLUG_CPU */
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /* Core99 Macs (dual G4s and G5s) */
975*4882a593Smuzhiyun static struct smp_ops_t core99_smp_ops = {
976*4882a593Smuzhiyun .message_pass = smp_mpic_message_pass,
977*4882a593Smuzhiyun .probe = smp_core99_probe,
978*4882a593Smuzhiyun #ifdef CONFIG_PPC64
979*4882a593Smuzhiyun .bringup_done = smp_core99_bringup_done,
980*4882a593Smuzhiyun #endif
981*4882a593Smuzhiyun .kick_cpu = smp_core99_kick_cpu,
982*4882a593Smuzhiyun .setup_cpu = smp_core99_setup_cpu,
983*4882a593Smuzhiyun .give_timebase = smp_core99_give_timebase,
984*4882a593Smuzhiyun .take_timebase = smp_core99_take_timebase,
985*4882a593Smuzhiyun #if defined(CONFIG_HOTPLUG_CPU)
986*4882a593Smuzhiyun .cpu_disable = smp_core99_cpu_disable,
987*4882a593Smuzhiyun .cpu_die = generic_cpu_die,
988*4882a593Smuzhiyun #endif
989*4882a593Smuzhiyun };
990*4882a593Smuzhiyun
pmac_setup_smp(void)991*4882a593Smuzhiyun void __init pmac_setup_smp(void)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun struct device_node *np;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun /* Check for Core99 */
996*4882a593Smuzhiyun np = of_find_node_by_name(NULL, "uni-n");
997*4882a593Smuzhiyun if (!np)
998*4882a593Smuzhiyun np = of_find_node_by_name(NULL, "u3");
999*4882a593Smuzhiyun if (!np)
1000*4882a593Smuzhiyun np = of_find_node_by_name(NULL, "u4");
1001*4882a593Smuzhiyun if (np) {
1002*4882a593Smuzhiyun of_node_put(np);
1003*4882a593Smuzhiyun smp_ops = &core99_smp_ops;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun #ifdef CONFIG_PPC_PMAC32_PSURGE
1006*4882a593Smuzhiyun else {
1007*4882a593Smuzhiyun /* We have to set bits in cpu_possible_mask here since the
1008*4882a593Smuzhiyun * secondary CPU(s) aren't in the device tree. Various
1009*4882a593Smuzhiyun * things won't be initialized for CPUs not in the possible
1010*4882a593Smuzhiyun * map, so we really need to fix it up here.
1011*4882a593Smuzhiyun */
1012*4882a593Smuzhiyun int cpu;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
1015*4882a593Smuzhiyun set_cpu_possible(cpu, true);
1016*4882a593Smuzhiyun smp_ops = &psurge_smp_ops;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun #endif /* CONFIG_PPC_PMAC32_PSURGE */
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
1021*4882a593Smuzhiyun smp_ops->cpu_offline_self = pmac_cpu_offline_self;
1022*4882a593Smuzhiyun #endif
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun
1026