xref: /OK3568_Linux_fs/kernel/arch/arm/mach-zynq/platsmp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This file contains Xilinx specific SMP code, used to start up
4*4882a593Smuzhiyun  * the second processor.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2011-2013 Xilinx
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * based on linux/arch/arm/mach-realview/platsmp.c
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Copyright (C) 2002 ARM Ltd.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/export.h>
14*4882a593Smuzhiyun #include <linux/jiffies.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <asm/cacheflush.h>
18*4882a593Smuzhiyun #include <asm/smp_plat.h>
19*4882a593Smuzhiyun #include <asm/smp_scu.h>
20*4882a593Smuzhiyun #include <linux/irqchip/arm-gic.h>
21*4882a593Smuzhiyun #include "common.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * Store number of cores in the system
25*4882a593Smuzhiyun  * Because of scu_get_core_count() must be in __init section and can't
26*4882a593Smuzhiyun  * be called from zynq_cpun_start() because it is not in __init section.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun static int ncores;
29*4882a593Smuzhiyun 
zynq_cpun_start(u32 address,int cpu)30*4882a593Smuzhiyun int zynq_cpun_start(u32 address, int cpu)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	u32 trampoline_code_size = &zynq_secondary_trampoline_end -
33*4882a593Smuzhiyun 						&zynq_secondary_trampoline;
34*4882a593Smuzhiyun 	u32 phy_cpuid = cpu_logical_map(cpu);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	/* MS: Expectation that SLCR are directly map and accessible */
37*4882a593Smuzhiyun 	/* Not possible to jump to non aligned address */
38*4882a593Smuzhiyun 	if (!(address & 3) && (!address || (address >= trampoline_code_size))) {
39*4882a593Smuzhiyun 		/* Store pointer to ioremap area which points to address 0x0 */
40*4882a593Smuzhiyun 		static u8 __iomem *zero;
41*4882a593Smuzhiyun 		u32 trampoline_size = &zynq_secondary_trampoline_jump -
42*4882a593Smuzhiyun 						&zynq_secondary_trampoline;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 		zynq_slcr_cpu_stop(phy_cpuid);
45*4882a593Smuzhiyun 		if (address) {
46*4882a593Smuzhiyun 			if (__pa(PAGE_OFFSET)) {
47*4882a593Smuzhiyun 				zero = ioremap(0, trampoline_code_size);
48*4882a593Smuzhiyun 				if (!zero) {
49*4882a593Smuzhiyun 					pr_warn("BOOTUP jump vectors not accessible\n");
50*4882a593Smuzhiyun 					return -1;
51*4882a593Smuzhiyun 				}
52*4882a593Smuzhiyun 			} else {
53*4882a593Smuzhiyun 				zero = (__force u8 __iomem *)PAGE_OFFSET;
54*4882a593Smuzhiyun 			}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 			/*
57*4882a593Smuzhiyun 			* This is elegant way how to jump to any address
58*4882a593Smuzhiyun 			* 0x0: Load address at 0x8 to r0
59*4882a593Smuzhiyun 			* 0x4: Jump by mov instruction
60*4882a593Smuzhiyun 			* 0x8: Jumping address
61*4882a593Smuzhiyun 			*/
62*4882a593Smuzhiyun 			memcpy_toio(zero, &zynq_secondary_trampoline,
63*4882a593Smuzhiyun 							trampoline_size);
64*4882a593Smuzhiyun 			writel(address, zero + trampoline_size);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 			flush_cache_all();
67*4882a593Smuzhiyun 			outer_flush_range(0, trampoline_code_size);
68*4882a593Smuzhiyun 			smp_wmb();
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 			if (__pa(PAGE_OFFSET))
71*4882a593Smuzhiyun 				iounmap(zero);
72*4882a593Smuzhiyun 		}
73*4882a593Smuzhiyun 		zynq_slcr_cpu_start(phy_cpuid);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 		return 0;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return -1;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun EXPORT_SYMBOL(zynq_cpun_start);
83*4882a593Smuzhiyun 
zynq_boot_secondary(unsigned int cpu,struct task_struct * idle)84*4882a593Smuzhiyun static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	return zynq_cpun_start(__pa_symbol(secondary_startup_arm), cpu);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Initialise the CPU possible map early - this describes the CPUs
91*4882a593Smuzhiyun  * which may be present or become present in the system.
92*4882a593Smuzhiyun  */
zynq_smp_init_cpus(void)93*4882a593Smuzhiyun static void __init zynq_smp_init_cpus(void)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	int i;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	ncores = scu_get_core_count(zynq_scu_base);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++)
100*4882a593Smuzhiyun 		set_cpu_possible(i, true);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
zynq_smp_prepare_cpus(unsigned int max_cpus)103*4882a593Smuzhiyun static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	scu_enable(zynq_scu_base);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /**
109*4882a593Smuzhiyun  * zynq_secondary_init - Initialize secondary CPU cores
110*4882a593Smuzhiyun  * @cpu:	CPU that is initialized
111*4882a593Smuzhiyun  *
112*4882a593Smuzhiyun  * This function is in the hotplug path. Don't move it into the
113*4882a593Smuzhiyun  * init section!!
114*4882a593Smuzhiyun  */
zynq_secondary_init(unsigned int cpu)115*4882a593Smuzhiyun static void zynq_secondary_init(unsigned int cpu)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	zynq_core_pm_init();
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
zynq_cpu_kill(unsigned cpu)121*4882a593Smuzhiyun static int zynq_cpu_kill(unsigned cpu)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	unsigned long timeout = jiffies + msecs_to_jiffies(50);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	while (zynq_slcr_cpu_state_read(cpu))
126*4882a593Smuzhiyun 		if (time_after(jiffies, timeout))
127*4882a593Smuzhiyun 			return 0;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	zynq_slcr_cpu_stop(cpu);
130*4882a593Smuzhiyun 	return 1;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun  * zynq_cpu_die - Let a CPU core die
135*4882a593Smuzhiyun  * @cpu:	Dying CPU
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * Platform-specific code to shutdown a CPU.
138*4882a593Smuzhiyun  * Called with IRQs disabled on the dying CPU.
139*4882a593Smuzhiyun  */
zynq_cpu_die(unsigned int cpu)140*4882a593Smuzhiyun static void zynq_cpu_die(unsigned int cpu)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	zynq_slcr_cpu_state_write(cpu, true);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/*
145*4882a593Smuzhiyun 	 * there is no power-control hardware on this platform, so all
146*4882a593Smuzhiyun 	 * we can do is put the core into WFI; this is safe as the calling
147*4882a593Smuzhiyun 	 * code will have already disabled interrupts
148*4882a593Smuzhiyun 	 */
149*4882a593Smuzhiyun 	for (;;)
150*4882a593Smuzhiyun 		cpu_do_idle();
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun #endif
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun const struct smp_operations zynq_smp_ops __initconst = {
155*4882a593Smuzhiyun 	.smp_init_cpus		= zynq_smp_init_cpus,
156*4882a593Smuzhiyun 	.smp_prepare_cpus	= zynq_smp_prepare_cpus,
157*4882a593Smuzhiyun 	.smp_boot_secondary	= zynq_boot_secondary,
158*4882a593Smuzhiyun 	.smp_secondary_init	= zynq_secondary_init,
159*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
160*4882a593Smuzhiyun 	.cpu_die		= zynq_cpu_die,
161*4882a593Smuzhiyun 	.cpu_kill		= zynq_cpu_kill,
162*4882a593Smuzhiyun #endif
163*4882a593Smuzhiyun };
164