1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/cpu.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/smp.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/kernel_stat.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/sched/hotplug.h>
15*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/export.h>
18*4882a593Smuzhiyun #include <linux/kexec.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <asm/mmu_context.h>
21*4882a593Smuzhiyun #include <asm/time.h>
22*4882a593Smuzhiyun #include <asm/setup.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <asm/octeon/octeon.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "octeon_boot.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun volatile unsigned long octeon_processor_boot = 0xff;
29*4882a593Smuzhiyun volatile unsigned long octeon_processor_sp;
30*4882a593Smuzhiyun volatile unsigned long octeon_processor_gp;
31*4882a593Smuzhiyun #ifdef CONFIG_RELOCATABLE
32*4882a593Smuzhiyun volatile unsigned long octeon_processor_relocated_kernel_entry;
33*4882a593Smuzhiyun #endif /* CONFIG_RELOCATABLE */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
36*4882a593Smuzhiyun uint64_t octeon_bootloader_entry_addr;
37*4882a593Smuzhiyun EXPORT_SYMBOL(octeon_bootloader_entry_addr);
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun extern void kernel_entry(unsigned long arg1, ...);
41*4882a593Smuzhiyun
octeon_icache_flush(void)42*4882a593Smuzhiyun static void octeon_icache_flush(void)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun asm volatile ("synci 0($0)\n");
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun static void (*octeon_message_functions[8])(void) = {
48*4882a593Smuzhiyun scheduler_ipi,
49*4882a593Smuzhiyun generic_smp_call_function_interrupt,
50*4882a593Smuzhiyun octeon_icache_flush,
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
mailbox_interrupt(int irq,void * dev_id)53*4882a593Smuzhiyun static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
56*4882a593Smuzhiyun u64 action;
57*4882a593Smuzhiyun int i;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * Make sure the function array initialization remains
61*4882a593Smuzhiyun * correct.
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
64*4882a593Smuzhiyun BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1));
65*4882a593Smuzhiyun BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2));
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Load the mailbox register to figure out what we're supposed
69*4882a593Smuzhiyun * to do.
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun action = cvmx_read_csr(mbox_clrx);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN68XX))
74*4882a593Smuzhiyun action &= 0xff;
75*4882a593Smuzhiyun else
76*4882a593Smuzhiyun action &= 0xffff;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Clear the mailbox to clear the interrupt */
79*4882a593Smuzhiyun cvmx_write_csr(mbox_clrx, action);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
82*4882a593Smuzhiyun if (action & 1) {
83*4882a593Smuzhiyun void (*fn)(void) = octeon_message_functions[i];
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (fn)
86*4882a593Smuzhiyun fn();
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun action >>= 1;
89*4882a593Smuzhiyun i++;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun return IRQ_HANDLED;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * Cause the function described by call_data to be executed on the passed
96*4882a593Smuzhiyun * cpu. When the function has finished, increment the finished field of
97*4882a593Smuzhiyun * call_data.
98*4882a593Smuzhiyun */
octeon_send_ipi_single(int cpu,unsigned int action)99*4882a593Smuzhiyun void octeon_send_ipi_single(int cpu, unsigned int action)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun int coreid = cpu_logical_map(cpu);
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
104*4882a593Smuzhiyun coreid, action);
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
octeon_send_ipi_mask(const struct cpumask * mask,unsigned int action)109*4882a593Smuzhiyun static inline void octeon_send_ipi_mask(const struct cpumask *mask,
110*4882a593Smuzhiyun unsigned int action)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun unsigned int i;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun for_each_cpu(i, mask)
115*4882a593Smuzhiyun octeon_send_ipi_single(i, action);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun * Detect available CPUs, populate cpu_possible_mask
120*4882a593Smuzhiyun */
octeon_smp_hotplug_setup(void)121*4882a593Smuzhiyun static void octeon_smp_hotplug_setup(void)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
124*4882a593Smuzhiyun struct linux_app_boot_info *labi;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (!setup_max_cpus)
127*4882a593Smuzhiyun return;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
130*4882a593Smuzhiyun if (labi->labi_signature != LABI_SIGNATURE) {
131*4882a593Smuzhiyun pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
132*4882a593Smuzhiyun return;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
136*4882a593Smuzhiyun #endif
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
octeon_smp_setup(void)139*4882a593Smuzhiyun static void __init octeon_smp_setup(void)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun const int coreid = cvmx_get_core_num();
142*4882a593Smuzhiyun int cpus;
143*4882a593Smuzhiyun int id;
144*4882a593Smuzhiyun struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
147*4882a593Smuzhiyun int core_mask = octeon_get_boot_coremask();
148*4882a593Smuzhiyun unsigned int num_cores = cvmx_octeon_num_cores();
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* The present CPUs are initially just the boot cpu (CPU 0). */
152*4882a593Smuzhiyun for (id = 0; id < NR_CPUS; id++) {
153*4882a593Smuzhiyun set_cpu_possible(id, id == 0);
154*4882a593Smuzhiyun set_cpu_present(id, id == 0);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun __cpu_number_map[coreid] = 0;
158*4882a593Smuzhiyun __cpu_logical_map[0] = coreid;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* The present CPUs get the lowest CPU numbers. */
161*4882a593Smuzhiyun cpus = 1;
162*4882a593Smuzhiyun for (id = 0; id < NR_CPUS; id++) {
163*4882a593Smuzhiyun if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
164*4882a593Smuzhiyun set_cpu_possible(cpus, true);
165*4882a593Smuzhiyun set_cpu_present(cpus, true);
166*4882a593Smuzhiyun __cpu_number_map[id] = cpus;
167*4882a593Smuzhiyun __cpu_logical_map[cpus] = id;
168*4882a593Smuzhiyun cpus++;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * The possible CPUs are all those present on the chip. We
175*4882a593Smuzhiyun * will assign CPU numbers for possible cores as well. Cores
176*4882a593Smuzhiyun * are always consecutively numberd from 0.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
179*4882a593Smuzhiyun id < num_cores && id < NR_CPUS; id++) {
180*4882a593Smuzhiyun if (!(core_mask & (1 << id))) {
181*4882a593Smuzhiyun set_cpu_possible(cpus, true);
182*4882a593Smuzhiyun __cpu_number_map[id] = cpus;
183*4882a593Smuzhiyun __cpu_logical_map[cpus] = id;
184*4882a593Smuzhiyun cpus++;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun #endif
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun octeon_smp_hotplug_setup();
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun #ifdef CONFIG_RELOCATABLE
plat_post_relocation(long offset)194*4882a593Smuzhiyun int plat_post_relocation(long offset)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun unsigned long entry = (unsigned long)kernel_entry;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Send secondaries into relocated kernel */
199*4882a593Smuzhiyun octeon_processor_relocated_kernel_entry = entry + offset;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun #endif /* CONFIG_RELOCATABLE */
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /**
206*4882a593Smuzhiyun * Firmware CPU startup hook
207*4882a593Smuzhiyun *
208*4882a593Smuzhiyun */
octeon_boot_secondary(int cpu,struct task_struct * idle)209*4882a593Smuzhiyun static int octeon_boot_secondary(int cpu, struct task_struct *idle)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun int count;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
214*4882a593Smuzhiyun cpu_logical_map(cpu));
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun octeon_processor_sp = __KSTK_TOS(idle);
217*4882a593Smuzhiyun octeon_processor_gp = (unsigned long)(task_thread_info(idle));
218*4882a593Smuzhiyun octeon_processor_boot = cpu_logical_map(cpu);
219*4882a593Smuzhiyun mb();
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun count = 10000;
222*4882a593Smuzhiyun while (octeon_processor_sp && count) {
223*4882a593Smuzhiyun /* Waiting for processor to get the SP and GP */
224*4882a593Smuzhiyun udelay(1);
225*4882a593Smuzhiyun count--;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun if (count == 0) {
228*4882a593Smuzhiyun pr_err("Secondary boot timeout\n");
229*4882a593Smuzhiyun return -ETIMEDOUT;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /**
236*4882a593Smuzhiyun * After we've done initial boot, this function is called to allow the
237*4882a593Smuzhiyun * board code to clean up state, if needed
238*4882a593Smuzhiyun */
octeon_init_secondary(void)239*4882a593Smuzhiyun static void octeon_init_secondary(void)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun unsigned int sr;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun sr = set_c0_status(ST0_BEV);
244*4882a593Smuzhiyun write_c0_ebase((u32)ebase);
245*4882a593Smuzhiyun write_c0_status(sr);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun octeon_check_cpu_bist();
248*4882a593Smuzhiyun octeon_init_cvmcount();
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun octeon_irq_setup_secondary();
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /**
254*4882a593Smuzhiyun * Callout to firmware before smp_init
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun */
octeon_prepare_cpus(unsigned int max_cpus)257*4882a593Smuzhiyun static void __init octeon_prepare_cpus(unsigned int max_cpus)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Only the low order mailbox bits are used for IPIs, leave
261*4882a593Smuzhiyun * the other bits alone.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
264*4882a593Smuzhiyun if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
265*4882a593Smuzhiyun IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
266*4882a593Smuzhiyun mailbox_interrupt)) {
267*4882a593Smuzhiyun panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun * Last chance for the board code to finish SMP initialization before
273*4882a593Smuzhiyun * the CPU is "online".
274*4882a593Smuzhiyun */
octeon_smp_finish(void)275*4882a593Smuzhiyun static void octeon_smp_finish(void)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun octeon_user_io_init();
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* to generate the first CPU timer interrupt */
280*4882a593Smuzhiyun write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
281*4882a593Smuzhiyun local_irq_enable();
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* State of each CPU. */
287*4882a593Smuzhiyun static DEFINE_PER_CPU(int, cpu_state);
288*4882a593Smuzhiyun
octeon_cpu_disable(void)289*4882a593Smuzhiyun static int octeon_cpu_disable(void)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun unsigned int cpu = smp_processor_id();
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (cpu == 0)
294*4882a593Smuzhiyun return -EBUSY;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (!octeon_bootloader_entry_addr)
297*4882a593Smuzhiyun return -ENOTSUPP;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun set_cpu_online(cpu, false);
300*4882a593Smuzhiyun calculate_cpu_foreign_map();
301*4882a593Smuzhiyun octeon_fixup_irqs();
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun __flush_cache_all();
304*4882a593Smuzhiyun local_flush_tlb_all();
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return 0;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
octeon_cpu_die(unsigned int cpu)309*4882a593Smuzhiyun static void octeon_cpu_die(unsigned int cpu)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun int coreid = cpu_logical_map(cpu);
312*4882a593Smuzhiyun uint32_t mask, new_mask;
313*4882a593Smuzhiyun const struct cvmx_bootmem_named_block_desc *block_desc;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun while (per_cpu(cpu_state, cpu) != CPU_DEAD)
316*4882a593Smuzhiyun cpu_relax();
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * This is a bit complicated strategics of getting/settig available
320*4882a593Smuzhiyun * cores mask, copied from bootloader
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun mask = 1 << coreid;
324*4882a593Smuzhiyun /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
325*4882a593Smuzhiyun block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (!block_desc) {
328*4882a593Smuzhiyun struct linux_app_boot_info *labi;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun labi->avail_coremask |= mask;
333*4882a593Smuzhiyun new_mask = labi->avail_coremask;
334*4882a593Smuzhiyun } else { /* alternative, already initialized */
335*4882a593Smuzhiyun uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
336*4882a593Smuzhiyun AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
337*4882a593Smuzhiyun *p |= mask;
338*4882a593Smuzhiyun new_mask = *p;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
342*4882a593Smuzhiyun mb();
343*4882a593Smuzhiyun cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
344*4882a593Smuzhiyun cvmx_write_csr(CVMX_CIU_PP_RST, 0);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
play_dead(void)347*4882a593Smuzhiyun void play_dead(void)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun int cpu = cpu_number_map(cvmx_get_core_num());
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun idle_task_exit();
352*4882a593Smuzhiyun octeon_processor_boot = 0xff;
353*4882a593Smuzhiyun per_cpu(cpu_state, cpu) = CPU_DEAD;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun mb();
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun while (1) /* core will be reset here */
358*4882a593Smuzhiyun ;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
start_after_reset(void)361*4882a593Smuzhiyun static void start_after_reset(void)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
octeon_update_boot_vector(unsigned int cpu)366*4882a593Smuzhiyun static int octeon_update_boot_vector(unsigned int cpu)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun int coreid = cpu_logical_map(cpu);
370*4882a593Smuzhiyun uint32_t avail_coremask;
371*4882a593Smuzhiyun const struct cvmx_bootmem_named_block_desc *block_desc;
372*4882a593Smuzhiyun struct boot_init_vector *boot_vect =
373*4882a593Smuzhiyun (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (!block_desc) {
378*4882a593Smuzhiyun struct linux_app_boot_info *labi;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun avail_coremask = labi->avail_coremask;
383*4882a593Smuzhiyun labi->avail_coremask &= ~(1 << coreid);
384*4882a593Smuzhiyun } else { /* alternative, already initialized */
385*4882a593Smuzhiyun avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
386*4882a593Smuzhiyun block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (!(avail_coremask & (1 << coreid))) {
390*4882a593Smuzhiyun /* core not available, assume, that caught by simple-executive */
391*4882a593Smuzhiyun cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
392*4882a593Smuzhiyun cvmx_write_csr(CVMX_CIU_PP_RST, 0);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun boot_vect[coreid].app_start_func_addr =
396*4882a593Smuzhiyun (uint32_t) (unsigned long) start_after_reset;
397*4882a593Smuzhiyun boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun mb();
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
register_cavium_notifier(void)406*4882a593Smuzhiyun static int register_cavium_notifier(void)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
409*4882a593Smuzhiyun "mips/cavium:prepare",
410*4882a593Smuzhiyun octeon_update_boot_vector, NULL);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun late_initcall(register_cavium_notifier);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun #endif /* CONFIG_HOTPLUG_CPU */
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun static const struct plat_smp_ops octeon_smp_ops = {
417*4882a593Smuzhiyun .send_ipi_single = octeon_send_ipi_single,
418*4882a593Smuzhiyun .send_ipi_mask = octeon_send_ipi_mask,
419*4882a593Smuzhiyun .init_secondary = octeon_init_secondary,
420*4882a593Smuzhiyun .smp_finish = octeon_smp_finish,
421*4882a593Smuzhiyun .boot_secondary = octeon_boot_secondary,
422*4882a593Smuzhiyun .smp_setup = octeon_smp_setup,
423*4882a593Smuzhiyun .prepare_cpus = octeon_prepare_cpus,
424*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
425*4882a593Smuzhiyun .cpu_disable = octeon_cpu_disable,
426*4882a593Smuzhiyun .cpu_die = octeon_cpu_die,
427*4882a593Smuzhiyun #endif
428*4882a593Smuzhiyun #ifdef CONFIG_KEXEC
429*4882a593Smuzhiyun .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
430*4882a593Smuzhiyun #endif
431*4882a593Smuzhiyun };
432*4882a593Smuzhiyun
octeon_78xx_reched_interrupt(int irq,void * dev_id)433*4882a593Smuzhiyun static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun scheduler_ipi();
436*4882a593Smuzhiyun return IRQ_HANDLED;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
octeon_78xx_call_function_interrupt(int irq,void * dev_id)439*4882a593Smuzhiyun static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun generic_smp_call_function_interrupt();
442*4882a593Smuzhiyun return IRQ_HANDLED;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
octeon_78xx_icache_flush_interrupt(int irq,void * dev_id)445*4882a593Smuzhiyun static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun octeon_icache_flush();
448*4882a593Smuzhiyun return IRQ_HANDLED;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * Callout to firmware before smp_init
453*4882a593Smuzhiyun */
octeon_78xx_prepare_cpus(unsigned int max_cpus)454*4882a593Smuzhiyun static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun if (request_irq(OCTEON_IRQ_MBOX0 + 0,
457*4882a593Smuzhiyun octeon_78xx_reched_interrupt,
458*4882a593Smuzhiyun IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
459*4882a593Smuzhiyun octeon_78xx_reched_interrupt)) {
460*4882a593Smuzhiyun panic("Cannot request_irq for SchedulerIPI");
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun if (request_irq(OCTEON_IRQ_MBOX0 + 1,
463*4882a593Smuzhiyun octeon_78xx_call_function_interrupt,
464*4882a593Smuzhiyun IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
465*4882a593Smuzhiyun octeon_78xx_call_function_interrupt)) {
466*4882a593Smuzhiyun panic("Cannot request_irq for SMP-Call");
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun if (request_irq(OCTEON_IRQ_MBOX0 + 2,
469*4882a593Smuzhiyun octeon_78xx_icache_flush_interrupt,
470*4882a593Smuzhiyun IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
471*4882a593Smuzhiyun octeon_78xx_icache_flush_interrupt)) {
472*4882a593Smuzhiyun panic("Cannot request_irq for ICache-Flush");
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
octeon_78xx_send_ipi_single(int cpu,unsigned int action)476*4882a593Smuzhiyun static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun int i;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
481*4882a593Smuzhiyun if (action & 1)
482*4882a593Smuzhiyun octeon_ciu3_mbox_send(cpu, i);
483*4882a593Smuzhiyun action >>= 1;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
octeon_78xx_send_ipi_mask(const struct cpumask * mask,unsigned int action)487*4882a593Smuzhiyun static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
488*4882a593Smuzhiyun unsigned int action)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun unsigned int cpu;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun for_each_cpu(cpu, mask)
493*4882a593Smuzhiyun octeon_78xx_send_ipi_single(cpu, action);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun static const struct plat_smp_ops octeon_78xx_smp_ops = {
497*4882a593Smuzhiyun .send_ipi_single = octeon_78xx_send_ipi_single,
498*4882a593Smuzhiyun .send_ipi_mask = octeon_78xx_send_ipi_mask,
499*4882a593Smuzhiyun .init_secondary = octeon_init_secondary,
500*4882a593Smuzhiyun .smp_finish = octeon_smp_finish,
501*4882a593Smuzhiyun .boot_secondary = octeon_boot_secondary,
502*4882a593Smuzhiyun .smp_setup = octeon_smp_setup,
503*4882a593Smuzhiyun .prepare_cpus = octeon_78xx_prepare_cpus,
504*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
505*4882a593Smuzhiyun .cpu_disable = octeon_cpu_disable,
506*4882a593Smuzhiyun .cpu_die = octeon_cpu_die,
507*4882a593Smuzhiyun #endif
508*4882a593Smuzhiyun #ifdef CONFIG_KEXEC
509*4882a593Smuzhiyun .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
510*4882a593Smuzhiyun #endif
511*4882a593Smuzhiyun };
512*4882a593Smuzhiyun
octeon_setup_smp(void)513*4882a593Smuzhiyun void __init octeon_setup_smp(void)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun const struct plat_smp_ops *ops;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (octeon_has_feature(OCTEON_FEATURE_CIU3))
518*4882a593Smuzhiyun ops = &octeon_78xx_smp_ops;
519*4882a593Smuzhiyun else
520*4882a593Smuzhiyun ops = &octeon_smp_ops;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun register_smp_ops(ops);
523*4882a593Smuzhiyun }
524