1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Common boot and setup code for both 32-bit and 64-bit.
4*4882a593Smuzhiyun * Extracted from arch/powerpc/kernel/setup_64.c.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2001 PPC64 Team, IBM Corp
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #undef DEBUG
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/reboot.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/initrd.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/seq_file.h>
21*4882a593Smuzhiyun #include <linux/ioport.h>
22*4882a593Smuzhiyun #include <linux/console.h>
23*4882a593Smuzhiyun #include <linux/screen_info.h>
24*4882a593Smuzhiyun #include <linux/root_dev.h>
25*4882a593Smuzhiyun #include <linux/notifier.h>
26*4882a593Smuzhiyun #include <linux/cpu.h>
27*4882a593Smuzhiyun #include <linux/unistd.h>
28*4882a593Smuzhiyun #include <linux/serial.h>
29*4882a593Smuzhiyun #include <linux/serial_8250.h>
30*4882a593Smuzhiyun #include <linux/percpu.h>
31*4882a593Smuzhiyun #include <linux/memblock.h>
32*4882a593Smuzhiyun #include <linux/of_platform.h>
33*4882a593Smuzhiyun #include <linux/hugetlb.h>
34*4882a593Smuzhiyun #include <linux/pgtable.h>
35*4882a593Smuzhiyun #include <asm/debugfs.h>
36*4882a593Smuzhiyun #include <asm/io.h>
37*4882a593Smuzhiyun #include <asm/paca.h>
38*4882a593Smuzhiyun #include <asm/prom.h>
39*4882a593Smuzhiyun #include <asm/processor.h>
40*4882a593Smuzhiyun #include <asm/vdso_datapage.h>
41*4882a593Smuzhiyun #include <asm/smp.h>
42*4882a593Smuzhiyun #include <asm/elf.h>
43*4882a593Smuzhiyun #include <asm/machdep.h>
44*4882a593Smuzhiyun #include <asm/time.h>
45*4882a593Smuzhiyun #include <asm/cputable.h>
46*4882a593Smuzhiyun #include <asm/sections.h>
47*4882a593Smuzhiyun #include <asm/firmware.h>
48*4882a593Smuzhiyun #include <asm/btext.h>
49*4882a593Smuzhiyun #include <asm/nvram.h>
50*4882a593Smuzhiyun #include <asm/setup.h>
51*4882a593Smuzhiyun #include <asm/rtas.h>
52*4882a593Smuzhiyun #include <asm/iommu.h>
53*4882a593Smuzhiyun #include <asm/serial.h>
54*4882a593Smuzhiyun #include <asm/cache.h>
55*4882a593Smuzhiyun #include <asm/page.h>
56*4882a593Smuzhiyun #include <asm/mmu.h>
57*4882a593Smuzhiyun #include <asm/xmon.h>
58*4882a593Smuzhiyun #include <asm/cputhreads.h>
59*4882a593Smuzhiyun #include <mm/mmu_decl.h>
60*4882a593Smuzhiyun #include <asm/fadump.h>
61*4882a593Smuzhiyun #include <asm/udbg.h>
62*4882a593Smuzhiyun #include <asm/hugetlb.h>
63*4882a593Smuzhiyun #include <asm/livepatch.h>
64*4882a593Smuzhiyun #include <asm/mmu_context.h>
65*4882a593Smuzhiyun #include <asm/cpu_has_feature.h>
66*4882a593Smuzhiyun #include <asm/kasan.h>
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #include "setup.h"
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #ifdef DEBUG
71*4882a593Smuzhiyun #include <asm/udbg.h>
72*4882a593Smuzhiyun #define DBG(fmt...) udbg_printf(fmt)
73*4882a593Smuzhiyun #else
74*4882a593Smuzhiyun #define DBG(fmt...)
75*4882a593Smuzhiyun #endif
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* The main machine-dep calls structure
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun struct machdep_calls ppc_md;
80*4882a593Smuzhiyun EXPORT_SYMBOL(ppc_md);
81*4882a593Smuzhiyun struct machdep_calls *machine_id;
82*4882a593Smuzhiyun EXPORT_SYMBOL(machine_id);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun int boot_cpuid = -1;
85*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(boot_cpuid);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * These are used in binfmt_elf.c to put aux entries on the stack
89*4882a593Smuzhiyun * for each elf executable being started.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun int dcache_bsize;
92*4882a593Smuzhiyun int icache_bsize;
93*4882a593Smuzhiyun int ucache_bsize;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun unsigned long klimit = (unsigned long) _end;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * This still seems to be needed... -- paulus
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun struct screen_info screen_info = {
102*4882a593Smuzhiyun .orig_x = 0,
103*4882a593Smuzhiyun .orig_y = 25,
104*4882a593Smuzhiyun .orig_video_cols = 80,
105*4882a593Smuzhiyun .orig_video_lines = 25,
106*4882a593Smuzhiyun .orig_video_isVGA = 1,
107*4882a593Smuzhiyun .orig_video_points = 16
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun #if defined(CONFIG_FB_VGA16_MODULE)
110*4882a593Smuzhiyun EXPORT_SYMBOL(screen_info);
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Variables required to store legacy IO irq routing */
114*4882a593Smuzhiyun int of_i8042_kbd_irq;
115*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
116*4882a593Smuzhiyun int of_i8042_aux_irq;
117*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_i8042_aux_irq);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun #ifdef __DO_IRQ_CANON
120*4882a593Smuzhiyun /* XXX should go elsewhere eventually */
121*4882a593Smuzhiyun int ppc_do_canonicalize_irqs;
122*4882a593Smuzhiyun EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
123*4882a593Smuzhiyun #endif
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #ifdef CONFIG_CRASH_CORE
126*4882a593Smuzhiyun /* This keeps a track of which one is the crashing cpu. */
127*4882a593Smuzhiyun int crashing_cpu = -1;
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* also used by kexec */
machine_shutdown(void)131*4882a593Smuzhiyun void machine_shutdown(void)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * if fadump is active, cleanup the fadump registration before we
135*4882a593Smuzhiyun * shutdown.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun fadump_cleanup();
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (ppc_md.machine_shutdown)
140*4882a593Smuzhiyun ppc_md.machine_shutdown();
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
machine_hang(void)143*4882a593Smuzhiyun static void machine_hang(void)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun pr_emerg("System Halted, OK to turn off power\n");
146*4882a593Smuzhiyun local_irq_disable();
147*4882a593Smuzhiyun while (1)
148*4882a593Smuzhiyun ;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
machine_restart(char * cmd)151*4882a593Smuzhiyun void machine_restart(char *cmd)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun machine_shutdown();
154*4882a593Smuzhiyun if (ppc_md.restart)
155*4882a593Smuzhiyun ppc_md.restart(cmd);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun smp_send_stop();
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun do_kernel_restart(cmd);
160*4882a593Smuzhiyun mdelay(1000);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun machine_hang();
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
machine_power_off(void)165*4882a593Smuzhiyun void machine_power_off(void)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun machine_shutdown();
168*4882a593Smuzhiyun if (pm_power_off)
169*4882a593Smuzhiyun pm_power_off();
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun smp_send_stop();
172*4882a593Smuzhiyun machine_hang();
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun /* Used by the G5 thermal driver */
175*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(machine_power_off);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun void (*pm_power_off)(void);
178*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pm_power_off);
179*4882a593Smuzhiyun
machine_halt(void)180*4882a593Smuzhiyun void machine_halt(void)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun machine_shutdown();
183*4882a593Smuzhiyun if (ppc_md.halt)
184*4882a593Smuzhiyun ppc_md.halt();
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun smp_send_stop();
187*4882a593Smuzhiyun machine_hang();
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun #ifdef CONFIG_SMP
191*4882a593Smuzhiyun DEFINE_PER_CPU(unsigned int, cpu_pvr);
192*4882a593Smuzhiyun #endif
193*4882a593Smuzhiyun
show_cpuinfo_summary(struct seq_file * m)194*4882a593Smuzhiyun static void show_cpuinfo_summary(struct seq_file *m)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct device_node *root;
197*4882a593Smuzhiyun const char *model = NULL;
198*4882a593Smuzhiyun unsigned long bogosum = 0;
199*4882a593Smuzhiyun int i;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) {
202*4882a593Smuzhiyun for_each_online_cpu(i)
203*4882a593Smuzhiyun bogosum += loops_per_jiffy;
204*4882a593Smuzhiyun seq_printf(m, "total bogomips\t: %lu.%02lu\n",
205*4882a593Smuzhiyun bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
208*4882a593Smuzhiyun if (ppc_md.name)
209*4882a593Smuzhiyun seq_printf(m, "platform\t: %s\n", ppc_md.name);
210*4882a593Smuzhiyun root = of_find_node_by_path("/");
211*4882a593Smuzhiyun if (root)
212*4882a593Smuzhiyun model = of_get_property(root, "model", NULL);
213*4882a593Smuzhiyun if (model)
214*4882a593Smuzhiyun seq_printf(m, "model\t\t: %s\n", model);
215*4882a593Smuzhiyun of_node_put(root);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (ppc_md.show_cpuinfo != NULL)
218*4882a593Smuzhiyun ppc_md.show_cpuinfo(m);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* Display the amount of memory */
221*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC32))
222*4882a593Smuzhiyun seq_printf(m, "Memory\t\t: %d MB\n",
223*4882a593Smuzhiyun (unsigned int)(total_memory / (1024 * 1024)));
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
show_cpuinfo(struct seq_file * m,void * v)226*4882a593Smuzhiyun static int show_cpuinfo(struct seq_file *m, void *v)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun unsigned long cpu_id = (unsigned long)v - 1;
229*4882a593Smuzhiyun unsigned int pvr;
230*4882a593Smuzhiyun unsigned long proc_freq;
231*4882a593Smuzhiyun unsigned short maj;
232*4882a593Smuzhiyun unsigned short min;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun #ifdef CONFIG_SMP
235*4882a593Smuzhiyun pvr = per_cpu(cpu_pvr, cpu_id);
236*4882a593Smuzhiyun #else
237*4882a593Smuzhiyun pvr = mfspr(SPRN_PVR);
238*4882a593Smuzhiyun #endif
239*4882a593Smuzhiyun maj = (pvr >> 8) & 0xFF;
240*4882a593Smuzhiyun min = pvr & 0xFF;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun seq_printf(m, "processor\t: %lu\n", cpu_id);
243*4882a593Smuzhiyun seq_printf(m, "cpu\t\t: ");
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
246*4882a593Smuzhiyun seq_printf(m, "%s", cur_cpu_spec->cpu_name);
247*4882a593Smuzhiyun else
248*4882a593Smuzhiyun seq_printf(m, "unknown (%08x)", pvr);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ALTIVEC))
251*4882a593Smuzhiyun seq_printf(m, ", altivec supported");
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun seq_printf(m, "\n");
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun #ifdef CONFIG_TAU
256*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_TAU)) {
257*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_TAU_AVERAGE)) {
258*4882a593Smuzhiyun /* more straightforward, but potentially misleading */
259*4882a593Smuzhiyun seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
260*4882a593Smuzhiyun cpu_temp(cpu_id));
261*4882a593Smuzhiyun } else {
262*4882a593Smuzhiyun /* show the actual temp sensor range */
263*4882a593Smuzhiyun u32 temp;
264*4882a593Smuzhiyun temp = cpu_temp_both(cpu_id);
265*4882a593Smuzhiyun seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
266*4882a593Smuzhiyun temp & 0xff, temp >> 16);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun #endif /* CONFIG_TAU */
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Platforms that have variable clock rates, should implement
273*4882a593Smuzhiyun * the method ppc_md.get_proc_freq() that reports the clock
274*4882a593Smuzhiyun * rate of a given cpu. The rest can use ppc_proc_freq to
275*4882a593Smuzhiyun * report the clock rate that is same across all cpus.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun if (ppc_md.get_proc_freq)
278*4882a593Smuzhiyun proc_freq = ppc_md.get_proc_freq(cpu_id);
279*4882a593Smuzhiyun else
280*4882a593Smuzhiyun proc_freq = ppc_proc_freq;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (proc_freq)
283*4882a593Smuzhiyun seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
284*4882a593Smuzhiyun proc_freq / 1000000, proc_freq % 1000000);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (ppc_md.show_percpuinfo != NULL)
287*4882a593Smuzhiyun ppc_md.show_percpuinfo(m, cpu_id);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* If we are a Freescale core do a simple check so
290*4882a593Smuzhiyun * we dont have to keep adding cases in the future */
291*4882a593Smuzhiyun if (PVR_VER(pvr) & 0x8000) {
292*4882a593Smuzhiyun switch (PVR_VER(pvr)) {
293*4882a593Smuzhiyun case 0x8000: /* 7441/7450/7451, Voyager */
294*4882a593Smuzhiyun case 0x8001: /* 7445/7455, Apollo 6 */
295*4882a593Smuzhiyun case 0x8002: /* 7447/7457, Apollo 7 */
296*4882a593Smuzhiyun case 0x8003: /* 7447A, Apollo 7 PM */
297*4882a593Smuzhiyun case 0x8004: /* 7448, Apollo 8 */
298*4882a593Smuzhiyun case 0x800c: /* 7410, Nitro */
299*4882a593Smuzhiyun maj = ((pvr >> 8) & 0xF);
300*4882a593Smuzhiyun min = PVR_MIN(pvr);
301*4882a593Smuzhiyun break;
302*4882a593Smuzhiyun default: /* e500/book-e */
303*4882a593Smuzhiyun maj = PVR_MAJ(pvr);
304*4882a593Smuzhiyun min = PVR_MIN(pvr);
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun } else {
308*4882a593Smuzhiyun switch (PVR_VER(pvr)) {
309*4882a593Smuzhiyun case 0x1008: /* 740P/750P ?? */
310*4882a593Smuzhiyun maj = ((pvr >> 8) & 0xFF) - 1;
311*4882a593Smuzhiyun min = pvr & 0xFF;
312*4882a593Smuzhiyun break;
313*4882a593Smuzhiyun case 0x004e: /* POWER9 bits 12-15 give chip type */
314*4882a593Smuzhiyun case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
315*4882a593Smuzhiyun maj = (pvr >> 8) & 0x0F;
316*4882a593Smuzhiyun min = pvr & 0xFF;
317*4882a593Smuzhiyun break;
318*4882a593Smuzhiyun default:
319*4882a593Smuzhiyun maj = (pvr >> 8) & 0xFF;
320*4882a593Smuzhiyun min = pvr & 0xFF;
321*4882a593Smuzhiyun break;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
326*4882a593Smuzhiyun maj, min, PVR_VER(pvr), PVR_REV(pvr));
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC32))
329*4882a593Smuzhiyun seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
330*4882a593Smuzhiyun (loops_per_jiffy / (5000 / HZ)) % 100);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun seq_printf(m, "\n");
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* If this is the last cpu, print the summary */
335*4882a593Smuzhiyun if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
336*4882a593Smuzhiyun show_cpuinfo_summary(m);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
c_start(struct seq_file * m,loff_t * pos)341*4882a593Smuzhiyun static void *c_start(struct seq_file *m, loff_t *pos)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun if (*pos == 0) /* just in case, cpu 0 is not the first */
344*4882a593Smuzhiyun *pos = cpumask_first(cpu_online_mask);
345*4882a593Smuzhiyun else
346*4882a593Smuzhiyun *pos = cpumask_next(*pos - 1, cpu_online_mask);
347*4882a593Smuzhiyun if ((*pos) < nr_cpu_ids)
348*4882a593Smuzhiyun return (void *)(unsigned long)(*pos + 1);
349*4882a593Smuzhiyun return NULL;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
c_next(struct seq_file * m,void * v,loff_t * pos)352*4882a593Smuzhiyun static void *c_next(struct seq_file *m, void *v, loff_t *pos)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun (*pos)++;
355*4882a593Smuzhiyun return c_start(m, pos);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
c_stop(struct seq_file * m,void * v)358*4882a593Smuzhiyun static void c_stop(struct seq_file *m, void *v)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun const struct seq_operations cpuinfo_op = {
363*4882a593Smuzhiyun .start = c_start,
364*4882a593Smuzhiyun .next = c_next,
365*4882a593Smuzhiyun .stop = c_stop,
366*4882a593Smuzhiyun .show = show_cpuinfo,
367*4882a593Smuzhiyun };
368*4882a593Smuzhiyun
check_for_initrd(void)369*4882a593Smuzhiyun void __init check_for_initrd(void)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_INITRD
372*4882a593Smuzhiyun DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n",
373*4882a593Smuzhiyun initrd_start, initrd_end);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* If we were passed an initrd, set the ROOT_DEV properly if the values
376*4882a593Smuzhiyun * look sensible. If not, clear initrd reference.
377*4882a593Smuzhiyun */
378*4882a593Smuzhiyun if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
379*4882a593Smuzhiyun initrd_end > initrd_start)
380*4882a593Smuzhiyun ROOT_DEV = Root_RAM0;
381*4882a593Smuzhiyun else
382*4882a593Smuzhiyun initrd_start = initrd_end = 0;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (initrd_start)
385*4882a593Smuzhiyun pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun DBG(" <- check_for_initrd()\n");
388*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_INITRD */
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun #ifdef CONFIG_SMP
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
394*4882a593Smuzhiyun cpumask_t threads_core_mask __read_mostly;
395*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(threads_per_core);
396*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(threads_per_subcore);
397*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(threads_shift);
398*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(threads_core_mask);
399*4882a593Smuzhiyun
cpu_init_thread_core_maps(int tpc)400*4882a593Smuzhiyun static void __init cpu_init_thread_core_maps(int tpc)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun int i;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun threads_per_core = tpc;
405*4882a593Smuzhiyun threads_per_subcore = tpc;
406*4882a593Smuzhiyun cpumask_clear(&threads_core_mask);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* This implementation only supports power of 2 number of threads
409*4882a593Smuzhiyun * for simplicity and performance
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun threads_shift = ilog2(tpc);
412*4882a593Smuzhiyun BUG_ON(tpc != (1 << threads_shift));
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun for (i = 0; i < tpc; i++)
415*4882a593Smuzhiyun cpumask_set_cpu(i, &threads_core_mask);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
418*4882a593Smuzhiyun tpc, tpc > 1 ? "s" : "");
419*4882a593Smuzhiyun printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun u32 *cpu_to_phys_id = NULL;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun * setup_cpu_maps - initialize the following cpu maps:
427*4882a593Smuzhiyun * cpu_possible_mask
428*4882a593Smuzhiyun * cpu_present_mask
429*4882a593Smuzhiyun *
430*4882a593Smuzhiyun * Having the possible map set up early allows us to restrict allocations
431*4882a593Smuzhiyun * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
432*4882a593Smuzhiyun *
433*4882a593Smuzhiyun * We do not initialize the online map here; cpus set their own bits in
434*4882a593Smuzhiyun * cpu_online_mask as they come up.
435*4882a593Smuzhiyun *
436*4882a593Smuzhiyun * This function is valid only for Open Firmware systems. finish_device_tree
437*4882a593Smuzhiyun * must be called before using this.
438*4882a593Smuzhiyun *
439*4882a593Smuzhiyun * While we're here, we may as well set the "physical" cpu ids in the paca.
440*4882a593Smuzhiyun *
441*4882a593Smuzhiyun * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
442*4882a593Smuzhiyun */
smp_setup_cpu_maps(void)443*4882a593Smuzhiyun void __init smp_setup_cpu_maps(void)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun struct device_node *dn;
446*4882a593Smuzhiyun int cpu = 0;
447*4882a593Smuzhiyun int nthreads = 1;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun DBG("smp_setup_cpu_maps()\n");
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
452*4882a593Smuzhiyun __alignof__(u32));
453*4882a593Smuzhiyun if (!cpu_to_phys_id)
454*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
455*4882a593Smuzhiyun __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun for_each_node_by_type(dn, "cpu") {
458*4882a593Smuzhiyun const __be32 *intserv;
459*4882a593Smuzhiyun __be32 cpu_be;
460*4882a593Smuzhiyun int j, len;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun DBG(" * %pOF...\n", dn);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
465*4882a593Smuzhiyun &len);
466*4882a593Smuzhiyun if (intserv) {
467*4882a593Smuzhiyun DBG(" ibm,ppc-interrupt-server#s -> %d threads\n",
468*4882a593Smuzhiyun nthreads);
469*4882a593Smuzhiyun } else {
470*4882a593Smuzhiyun DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
471*4882a593Smuzhiyun intserv = of_get_property(dn, "reg", &len);
472*4882a593Smuzhiyun if (!intserv) {
473*4882a593Smuzhiyun cpu_be = cpu_to_be32(cpu);
474*4882a593Smuzhiyun /* XXX: what is this? uninitialized?? */
475*4882a593Smuzhiyun intserv = &cpu_be; /* assume logical == phys */
476*4882a593Smuzhiyun len = 4;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun nthreads = len / sizeof(int);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
483*4882a593Smuzhiyun bool avail;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun DBG(" thread %d -> cpu %d (hard id %d)\n",
486*4882a593Smuzhiyun j, cpu, be32_to_cpu(intserv[j]));
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun avail = of_device_is_available(dn);
489*4882a593Smuzhiyun if (!avail)
490*4882a593Smuzhiyun avail = !of_property_match_string(dn,
491*4882a593Smuzhiyun "enable-method", "spin-table");
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun set_cpu_present(cpu, avail);
494*4882a593Smuzhiyun set_cpu_possible(cpu, true);
495*4882a593Smuzhiyun cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
496*4882a593Smuzhiyun cpu++;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if (cpu >= nr_cpu_ids) {
500*4882a593Smuzhiyun of_node_put(dn);
501*4882a593Smuzhiyun break;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /* If no SMT supported, nthreads is forced to 1 */
506*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_SMT)) {
507*4882a593Smuzhiyun DBG(" SMT disabled ! nthreads forced to 1\n");
508*4882a593Smuzhiyun nthreads = 1;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun #ifdef CONFIG_PPC64
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * On pSeries LPAR, we need to know how many cpus
514*4882a593Smuzhiyun * could possibly be added to this partition.
515*4882a593Smuzhiyun */
516*4882a593Smuzhiyun if (firmware_has_feature(FW_FEATURE_LPAR) &&
517*4882a593Smuzhiyun (dn = of_find_node_by_path("/rtas"))) {
518*4882a593Smuzhiyun int num_addr_cell, num_size_cell, maxcpus;
519*4882a593Smuzhiyun const __be32 *ireg;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun num_addr_cell = of_n_addr_cells(dn);
522*4882a593Smuzhiyun num_size_cell = of_n_size_cells(dn);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (!ireg)
527*4882a593Smuzhiyun goto out;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* Double maxcpus for processors which have SMT capability */
532*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_SMT))
533*4882a593Smuzhiyun maxcpus *= nthreads;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (maxcpus > nr_cpu_ids) {
536*4882a593Smuzhiyun printk(KERN_WARNING
537*4882a593Smuzhiyun "Partition configured for %d cpus, "
538*4882a593Smuzhiyun "operating system maximum is %u.\n",
539*4882a593Smuzhiyun maxcpus, nr_cpu_ids);
540*4882a593Smuzhiyun maxcpus = nr_cpu_ids;
541*4882a593Smuzhiyun } else
542*4882a593Smuzhiyun printk(KERN_INFO "Partition configured for %d cpus.\n",
543*4882a593Smuzhiyun maxcpus);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun for (cpu = 0; cpu < maxcpus; cpu++)
546*4882a593Smuzhiyun set_cpu_possible(cpu, true);
547*4882a593Smuzhiyun out:
548*4882a593Smuzhiyun of_node_put(dn);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun vdso_data->processorCount = num_present_cpus();
551*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* Initialize CPU <=> thread mapping/
554*4882a593Smuzhiyun *
555*4882a593Smuzhiyun * WARNING: We assume that the number of threads is the same for
556*4882a593Smuzhiyun * every CPU in the system. If that is not the case, then some code
557*4882a593Smuzhiyun * here will have to be reworked
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun cpu_init_thread_core_maps(nthreads);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* Now that possible cpus are set, set nr_cpu_ids for later use */
562*4882a593Smuzhiyun setup_nr_cpu_ids();
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun free_unused_pacas();
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun #endif /* CONFIG_SMP */
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun #ifdef CONFIG_PCSPKR_PLATFORM
add_pcspkr(void)569*4882a593Smuzhiyun static __init int add_pcspkr(void)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun struct device_node *np;
572*4882a593Smuzhiyun struct platform_device *pd;
573*4882a593Smuzhiyun int ret;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
576*4882a593Smuzhiyun of_node_put(np);
577*4882a593Smuzhiyun if (!np)
578*4882a593Smuzhiyun return -ENODEV;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun pd = platform_device_alloc("pcspkr", -1);
581*4882a593Smuzhiyun if (!pd)
582*4882a593Smuzhiyun return -ENOMEM;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun ret = platform_device_add(pd);
585*4882a593Smuzhiyun if (ret)
586*4882a593Smuzhiyun platform_device_put(pd);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun return ret;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun device_initcall(add_pcspkr);
591*4882a593Smuzhiyun #endif /* CONFIG_PCSPKR_PLATFORM */
592*4882a593Smuzhiyun
probe_machine(void)593*4882a593Smuzhiyun void probe_machine(void)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun extern struct machdep_calls __machine_desc_start;
596*4882a593Smuzhiyun extern struct machdep_calls __machine_desc_end;
597*4882a593Smuzhiyun unsigned int i;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun * Iterate all ppc_md structures until we find the proper
601*4882a593Smuzhiyun * one for the current machine type
602*4882a593Smuzhiyun */
603*4882a593Smuzhiyun DBG("Probing machine type ...\n");
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * Check ppc_md is empty, if not we have a bug, ie, we setup an
607*4882a593Smuzhiyun * entry before probe_machine() which will be overwritten
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
610*4882a593Smuzhiyun if (((void **)&ppc_md)[i]) {
611*4882a593Smuzhiyun printk(KERN_ERR "Entry %d in ppc_md non empty before"
612*4882a593Smuzhiyun " machine probe !\n", i);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun for (machine_id = &__machine_desc_start;
617*4882a593Smuzhiyun machine_id < &__machine_desc_end;
618*4882a593Smuzhiyun machine_id++) {
619*4882a593Smuzhiyun DBG(" %s ...", machine_id->name);
620*4882a593Smuzhiyun memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
621*4882a593Smuzhiyun if (ppc_md.probe()) {
622*4882a593Smuzhiyun DBG(" match !\n");
623*4882a593Smuzhiyun break;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun DBG("\n");
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun /* What can we do if we didn't find ? */
628*4882a593Smuzhiyun if (machine_id >= &__machine_desc_end) {
629*4882a593Smuzhiyun pr_err("No suitable machine description found !\n");
630*4882a593Smuzhiyun for (;;);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /* Match a class of boards, not a specific device configuration. */
check_legacy_ioport(unsigned long base_port)637*4882a593Smuzhiyun int check_legacy_ioport(unsigned long base_port)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun struct device_node *parent, *np = NULL;
640*4882a593Smuzhiyun int ret = -ENODEV;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun switch(base_port) {
643*4882a593Smuzhiyun case I8042_DATA_REG:
644*4882a593Smuzhiyun if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
645*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
646*4882a593Smuzhiyun if (np) {
647*4882a593Smuzhiyun parent = of_get_parent(np);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0);
650*4882a593Smuzhiyun if (!of_i8042_kbd_irq)
651*4882a593Smuzhiyun of_i8042_kbd_irq = 1;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun of_i8042_aux_irq = irq_of_parse_and_map(parent, 1);
654*4882a593Smuzhiyun if (!of_i8042_aux_irq)
655*4882a593Smuzhiyun of_i8042_aux_irq = 12;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun of_node_put(np);
658*4882a593Smuzhiyun np = parent;
659*4882a593Smuzhiyun break;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun np = of_find_node_by_type(NULL, "8042");
662*4882a593Smuzhiyun /* Pegasos has no device_type on its 8042 node, look for the
663*4882a593Smuzhiyun * name instead */
664*4882a593Smuzhiyun if (!np)
665*4882a593Smuzhiyun np = of_find_node_by_name(NULL, "8042");
666*4882a593Smuzhiyun if (np) {
667*4882a593Smuzhiyun of_i8042_kbd_irq = 1;
668*4882a593Smuzhiyun of_i8042_aux_irq = 12;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun break;
671*4882a593Smuzhiyun case FDC_BASE: /* FDC1 */
672*4882a593Smuzhiyun np = of_find_node_by_type(NULL, "fdc");
673*4882a593Smuzhiyun break;
674*4882a593Smuzhiyun default:
675*4882a593Smuzhiyun /* ipmi is supposed to fail here */
676*4882a593Smuzhiyun break;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun if (!np)
679*4882a593Smuzhiyun return ret;
680*4882a593Smuzhiyun parent = of_get_parent(np);
681*4882a593Smuzhiyun if (parent) {
682*4882a593Smuzhiyun if (of_node_is_type(parent, "isa"))
683*4882a593Smuzhiyun ret = 0;
684*4882a593Smuzhiyun of_node_put(parent);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun of_node_put(np);
687*4882a593Smuzhiyun return ret;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun EXPORT_SYMBOL(check_legacy_ioport);
690*4882a593Smuzhiyun
ppc_panic_event(struct notifier_block * this,unsigned long event,void * ptr)691*4882a593Smuzhiyun static int ppc_panic_event(struct notifier_block *this,
692*4882a593Smuzhiyun unsigned long event, void *ptr)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun /*
695*4882a593Smuzhiyun * panic does a local_irq_disable, but we really
696*4882a593Smuzhiyun * want interrupts to be hard disabled.
697*4882a593Smuzhiyun */
698*4882a593Smuzhiyun hard_irq_disable();
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /*
701*4882a593Smuzhiyun * If firmware-assisted dump has been registered then trigger
702*4882a593Smuzhiyun * firmware-assisted dump and let firmware handle everything else.
703*4882a593Smuzhiyun */
704*4882a593Smuzhiyun crash_fadump(NULL, ptr);
705*4882a593Smuzhiyun if (ppc_md.panic)
706*4882a593Smuzhiyun ppc_md.panic(ptr); /* May not return */
707*4882a593Smuzhiyun return NOTIFY_DONE;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun static struct notifier_block ppc_panic_block = {
711*4882a593Smuzhiyun .notifier_call = ppc_panic_event,
712*4882a593Smuzhiyun .priority = INT_MIN /* may not return; must be done last */
713*4882a593Smuzhiyun };
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /*
716*4882a593Smuzhiyun * Dump out kernel offset information on panic.
717*4882a593Smuzhiyun */
dump_kernel_offset(struct notifier_block * self,unsigned long v,void * p)718*4882a593Smuzhiyun static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
719*4882a593Smuzhiyun void *p)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
722*4882a593Smuzhiyun kaslr_offset(), KERNELBASE);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun return 0;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun static struct notifier_block kernel_offset_notifier = {
728*4882a593Smuzhiyun .notifier_call = dump_kernel_offset
729*4882a593Smuzhiyun };
730*4882a593Smuzhiyun
setup_panic(void)731*4882a593Smuzhiyun void __init setup_panic(void)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
734*4882a593Smuzhiyun atomic_notifier_chain_register(&panic_notifier_list,
735*4882a593Smuzhiyun &kernel_offset_notifier);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /* PPC64 always does a hard irq disable in its panic handler */
738*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
739*4882a593Smuzhiyun return;
740*4882a593Smuzhiyun atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun #ifdef CONFIG_CHECK_CACHE_COHERENCY
744*4882a593Smuzhiyun /*
745*4882a593Smuzhiyun * For platforms that have configurable cache-coherency. This function
746*4882a593Smuzhiyun * checks that the cache coherency setting of the kernel matches the setting
747*4882a593Smuzhiyun * left by the firmware, as indicated in the device tree. Since a mismatch
748*4882a593Smuzhiyun * will eventually result in DMA failures, we print * and error and call
749*4882a593Smuzhiyun * BUG() in that case.
750*4882a593Smuzhiyun */
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun #define KERNEL_COHERENCY (!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
753*4882a593Smuzhiyun
check_cache_coherency(void)754*4882a593Smuzhiyun static int __init check_cache_coherency(void)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun struct device_node *np;
757*4882a593Smuzhiyun const void *prop;
758*4882a593Smuzhiyun bool devtree_coherency;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun np = of_find_node_by_path("/");
761*4882a593Smuzhiyun prop = of_get_property(np, "coherency-off", NULL);
762*4882a593Smuzhiyun of_node_put(np);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun devtree_coherency = prop ? false : true;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun if (devtree_coherency != KERNEL_COHERENCY) {
767*4882a593Smuzhiyun printk(KERN_ERR
768*4882a593Smuzhiyun "kernel coherency:%s != device tree_coherency:%s\n",
769*4882a593Smuzhiyun KERNEL_COHERENCY ? "on" : "off",
770*4882a593Smuzhiyun devtree_coherency ? "on" : "off");
771*4882a593Smuzhiyun BUG();
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun return 0;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun late_initcall(check_cache_coherency);
778*4882a593Smuzhiyun #endif /* CONFIG_CHECK_CACHE_COHERENCY */
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
781*4882a593Smuzhiyun struct dentry *powerpc_debugfs_root;
782*4882a593Smuzhiyun EXPORT_SYMBOL(powerpc_debugfs_root);
783*4882a593Smuzhiyun
powerpc_debugfs_init(void)784*4882a593Smuzhiyun static int powerpc_debugfs_init(void)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
787*4882a593Smuzhiyun return 0;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun arch_initcall(powerpc_debugfs_init);
790*4882a593Smuzhiyun #endif
791*4882a593Smuzhiyun
ppc_printk_progress(char * s,unsigned short hex)792*4882a593Smuzhiyun void ppc_printk_progress(char *s, unsigned short hex)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun pr_info("%s\n", s);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
print_system_info(void)797*4882a593Smuzhiyun static __init void print_system_info(void)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun pr_info("-----------------------------------------------------\n");
800*4882a593Smuzhiyun pr_info("phys_mem_size = 0x%llx\n",
801*4882a593Smuzhiyun (unsigned long long)memblock_phys_mem_size());
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
804*4882a593Smuzhiyun pr_info("icache_bsize = 0x%x\n", icache_bsize);
805*4882a593Smuzhiyun if (ucache_bsize != 0)
806*4882a593Smuzhiyun pr_info("ucache_bsize = 0x%x\n", ucache_bsize);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
809*4882a593Smuzhiyun pr_info(" possible = 0x%016lx\n",
810*4882a593Smuzhiyun (unsigned long)CPU_FTRS_POSSIBLE);
811*4882a593Smuzhiyun pr_info(" always = 0x%016lx\n",
812*4882a593Smuzhiyun (unsigned long)CPU_FTRS_ALWAYS);
813*4882a593Smuzhiyun pr_info("cpu_user_features = 0x%08x 0x%08x\n",
814*4882a593Smuzhiyun cur_cpu_spec->cpu_user_features,
815*4882a593Smuzhiyun cur_cpu_spec->cpu_user_features2);
816*4882a593Smuzhiyun pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
817*4882a593Smuzhiyun #ifdef CONFIG_PPC64
818*4882a593Smuzhiyun pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
819*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
820*4882a593Smuzhiyun pr_info("vmalloc start = 0x%lx\n", KERN_VIRT_START);
821*4882a593Smuzhiyun pr_info("IO start = 0x%lx\n", KERN_IO_START);
822*4882a593Smuzhiyun pr_info("vmemmap start = 0x%lx\n", (unsigned long)vmemmap);
823*4882a593Smuzhiyun #endif
824*4882a593Smuzhiyun #endif
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (!early_radix_enabled())
827*4882a593Smuzhiyun print_system_hash_info();
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (PHYSICAL_START > 0)
830*4882a593Smuzhiyun pr_info("physical_start = 0x%llx\n",
831*4882a593Smuzhiyun (unsigned long long)PHYSICAL_START);
832*4882a593Smuzhiyun pr_info("-----------------------------------------------------\n");
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun #ifdef CONFIG_SMP
smp_setup_pacas(void)836*4882a593Smuzhiyun static void smp_setup_pacas(void)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun int cpu;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
841*4882a593Smuzhiyun if (cpu == smp_processor_id())
842*4882a593Smuzhiyun continue;
843*4882a593Smuzhiyun allocate_paca(cpu);
844*4882a593Smuzhiyun set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32));
848*4882a593Smuzhiyun cpu_to_phys_id = NULL;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun #endif
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /*
853*4882a593Smuzhiyun * Called into from start_kernel this initializes memblock, which is used
854*4882a593Smuzhiyun * to manage page allocation until mem_init is called.
855*4882a593Smuzhiyun */
setup_arch(char ** cmdline_p)856*4882a593Smuzhiyun void __init setup_arch(char **cmdline_p)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun kasan_init();
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun *cmdline_p = boot_command_line;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* Set a half-reasonable default so udelay does something sensible */
863*4882a593Smuzhiyun loops_per_jiffy = 500000000 / HZ;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /* Unflatten the device-tree passed by prom_init or kexec */
866*4882a593Smuzhiyun unflatten_device_tree();
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /*
869*4882a593Smuzhiyun * Initialize cache line/block info from device-tree (on ppc64) or
870*4882a593Smuzhiyun * just cputable (on ppc32).
871*4882a593Smuzhiyun */
872*4882a593Smuzhiyun initialize_cache_info();
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /* Initialize RTAS if available. */
875*4882a593Smuzhiyun rtas_initialize();
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /* Check if we have an initrd provided via the device-tree. */
878*4882a593Smuzhiyun check_for_initrd();
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /* Probe the machine type, establish ppc_md. */
881*4882a593Smuzhiyun probe_machine();
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* Setup panic notifier if requested by the platform. */
884*4882a593Smuzhiyun setup_panic();
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /*
887*4882a593Smuzhiyun * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
888*4882a593Smuzhiyun * it from their respective probe() function.
889*4882a593Smuzhiyun */
890*4882a593Smuzhiyun setup_power_save();
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun /* Discover standard serial ports. */
893*4882a593Smuzhiyun find_legacy_serial_ports();
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun /* Register early console with the printk subsystem. */
896*4882a593Smuzhiyun register_early_udbg_console();
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /* Setup the various CPU maps based on the device-tree. */
899*4882a593Smuzhiyun smp_setup_cpu_maps();
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /* Initialize xmon. */
902*4882a593Smuzhiyun xmon_setup();
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun /* Check the SMT related command line arguments (ppc64). */
905*4882a593Smuzhiyun check_smt_enabled();
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Parse memory topology */
908*4882a593Smuzhiyun mem_topology_setup();
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /*
911*4882a593Smuzhiyun * Release secondary cpus out of their spinloops at 0x60 now that
912*4882a593Smuzhiyun * we can map physical -> logical CPU ids.
913*4882a593Smuzhiyun *
914*4882a593Smuzhiyun * Freescale Book3e parts spin in a loop provided by firmware,
915*4882a593Smuzhiyun * so smp_release_cpus() does nothing for them.
916*4882a593Smuzhiyun */
917*4882a593Smuzhiyun #ifdef CONFIG_SMP
918*4882a593Smuzhiyun smp_setup_pacas();
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun /* On BookE, setup per-core TLB data structures. */
921*4882a593Smuzhiyun setup_tlb_core_data();
922*4882a593Smuzhiyun #endif
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun /* Print various info about the machine that has been gathered so far. */
925*4882a593Smuzhiyun print_system_info();
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /* Reserve large chunks of memory for use by CMA for KVM. */
928*4882a593Smuzhiyun kvm_cma_reserve();
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /* Reserve large chunks of memory for us by CMA for hugetlb */
931*4882a593Smuzhiyun gigantic_hugetlb_cma_reserve();
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun klp_init_thread_info(&init_task);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun init_mm.start_code = (unsigned long)_stext;
936*4882a593Smuzhiyun init_mm.end_code = (unsigned long) _etext;
937*4882a593Smuzhiyun init_mm.end_data = (unsigned long) _edata;
938*4882a593Smuzhiyun init_mm.brk = klimit;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun mm_iommu_init(&init_mm);
941*4882a593Smuzhiyun irqstack_early_init();
942*4882a593Smuzhiyun exc_lvl_early_init();
943*4882a593Smuzhiyun emergency_stack_init();
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun smp_release_cpus();
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun initmem_init();
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if (ppc_md.setup_arch)
952*4882a593Smuzhiyun ppc_md.setup_arch();
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun setup_barrier_nospec();
955*4882a593Smuzhiyun setup_spectre_v2();
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun paging_init();
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun /* Initialize the MMU context management stuff. */
960*4882a593Smuzhiyun mmu_context_init();
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun /* Interrupt code needs to be 64K-aligned. */
963*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff)
964*4882a593Smuzhiyun panic("Kernelbase not 64K-aligned (0x%lx)!\n",
965*4882a593Smuzhiyun (unsigned long)_stext);
966*4882a593Smuzhiyun }
967