xref: /OK3568_Linux_fs/kernel/arch/arm/mach-shmobile/platsmp-apmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * SMP support for SoCs with APMU
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2014  Renesas Electronics Corporation
6*4882a593Smuzhiyun  * Copyright (C) 2013  Magnus Damm
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/cpu_pm.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/ioport.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/smp.h>
15*4882a593Smuzhiyun #include <linux/suspend.h>
16*4882a593Smuzhiyun #include <linux/threads.h>
17*4882a593Smuzhiyun #include <asm/cacheflush.h>
18*4882a593Smuzhiyun #include <asm/cp15.h>
19*4882a593Smuzhiyun #include <asm/proc-fns.h>
20*4882a593Smuzhiyun #include <asm/smp_plat.h>
21*4882a593Smuzhiyun #include <asm/suspend.h>
22*4882a593Smuzhiyun #include "common.h"
23*4882a593Smuzhiyun #include "rcar-gen2.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static struct {
26*4882a593Smuzhiyun 	void __iomem *iomem;
27*4882a593Smuzhiyun 	int bit;
28*4882a593Smuzhiyun } apmu_cpus[NR_CPUS];
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define WUPCR_OFFS	 0x10		/* Wake Up Control Register */
31*4882a593Smuzhiyun #define PSTR_OFFS	 0x40		/* Power Status Register */
32*4882a593Smuzhiyun #define CPUNCR_OFFS(n)	(0x100 + (0x10 * (n)))
33*4882a593Smuzhiyun 					/* CPUn Power Status Control Register */
34*4882a593Smuzhiyun #define DBGRCR_OFFS	0x180		/* Debug Resource Reset Control Reg. */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* Power Status Register */
37*4882a593Smuzhiyun #define CPUNST(r, n)	(((r) >> (n * 4)) & 3)	/* CPUn Status Bit */
38*4882a593Smuzhiyun #define CPUST_RUN	0		/* Run Mode */
39*4882a593Smuzhiyun #define CPUST_STANDBY	3		/* CoreStandby Mode */
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /* Debug Resource Reset Control Register */
42*4882a593Smuzhiyun #define DBGCPUREN	BIT(24)		/* CPU Other Reset Request Enable */
43*4882a593Smuzhiyun #define DBGCPUNREN(n)	BIT((n) + 20)	/* CPUn Reset Request Enable */
44*4882a593Smuzhiyun #define DBGCPUPREN	BIT(19)		/* CPU Peripheral Reset Req. Enable */
45*4882a593Smuzhiyun 
apmu_power_on(void __iomem * p,int bit)46*4882a593Smuzhiyun static int __maybe_unused apmu_power_on(void __iomem *p, int bit)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	/* request power on */
49*4882a593Smuzhiyun 	writel_relaxed(BIT(bit), p + WUPCR_OFFS);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/* wait for APMU to finish */
52*4882a593Smuzhiyun 	while (readl_relaxed(p + WUPCR_OFFS) != 0)
53*4882a593Smuzhiyun 		;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
apmu_power_off(void __iomem * p,int bit)58*4882a593Smuzhiyun static int __maybe_unused apmu_power_off(void __iomem *p, int bit)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	/* request Core Standby for next WFI */
61*4882a593Smuzhiyun 	writel_relaxed(3, p + CPUNCR_OFFS(bit));
62*4882a593Smuzhiyun 	return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
apmu_power_off_poll(void __iomem * p,int bit)65*4882a593Smuzhiyun static int __maybe_unused apmu_power_off_poll(void __iomem *p, int bit)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	int k;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	for (k = 0; k < 1000; k++) {
70*4882a593Smuzhiyun 		if (CPUNST(readl_relaxed(p + PSTR_OFFS), bit) == CPUST_STANDBY)
71*4882a593Smuzhiyun 			return 1;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 		mdelay(1);
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	return 0;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
apmu_wrap(int cpu,int (* fn)(void __iomem * p,int cpu))79*4882a593Smuzhiyun static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu))
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	void __iomem *p = apmu_cpus[cpu].iomem;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_SUSPEND)
87*4882a593Smuzhiyun /* nicked from arch/arm/mach-exynos/hotplug.c */
cpu_enter_lowpower_a15(void)88*4882a593Smuzhiyun static inline void cpu_enter_lowpower_a15(void)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	unsigned int v;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	asm volatile(
93*4882a593Smuzhiyun 	"       mrc     p15, 0, %0, c1, c0, 0\n"
94*4882a593Smuzhiyun 	"       bic     %0, %0, %1\n"
95*4882a593Smuzhiyun 	"       mcr     p15, 0, %0, c1, c0, 0\n"
96*4882a593Smuzhiyun 		: "=&r" (v)
97*4882a593Smuzhiyun 		: "Ir" (CR_C)
98*4882a593Smuzhiyun 		: "cc");
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	flush_cache_louis();
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	asm volatile(
103*4882a593Smuzhiyun 	/*
104*4882a593Smuzhiyun 	 * Turn off coherency
105*4882a593Smuzhiyun 	 */
106*4882a593Smuzhiyun 	"       mrc     p15, 0, %0, c1, c0, 1\n"
107*4882a593Smuzhiyun 	"       bic     %0, %0, %1\n"
108*4882a593Smuzhiyun 	"       mcr     p15, 0, %0, c1, c0, 1\n"
109*4882a593Smuzhiyun 		: "=&r" (v)
110*4882a593Smuzhiyun 		: "Ir" (0x40)
111*4882a593Smuzhiyun 		: "cc");
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	isb();
114*4882a593Smuzhiyun 	dsb();
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
shmobile_smp_apmu_cpu_shutdown(unsigned int cpu)117*4882a593Smuzhiyun static void shmobile_smp_apmu_cpu_shutdown(unsigned int cpu)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/* Select next sleep mode using the APMU */
121*4882a593Smuzhiyun 	apmu_wrap(cpu, apmu_power_off);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* Do ARM specific CPU shutdown */
124*4882a593Smuzhiyun 	cpu_enter_lowpower_a15();
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun #endif
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #if defined(CONFIG_HOTPLUG_CPU)
shmobile_smp_apmu_cpu_die(unsigned int cpu)129*4882a593Smuzhiyun static void shmobile_smp_apmu_cpu_die(unsigned int cpu)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	/* For this particular CPU deregister boot vector */
132*4882a593Smuzhiyun 	shmobile_smp_hook(cpu, 0, 0);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* Shutdown CPU core */
135*4882a593Smuzhiyun 	shmobile_smp_apmu_cpu_shutdown(cpu);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* jump to shared mach-shmobile sleep / reset code */
138*4882a593Smuzhiyun 	shmobile_smp_sleep();
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
shmobile_smp_apmu_cpu_kill(unsigned int cpu)141*4882a593Smuzhiyun static int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	return apmu_wrap(cpu, apmu_power_off_poll);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #if defined(CONFIG_SUSPEND)
shmobile_smp_apmu_do_suspend(unsigned long cpu)148*4882a593Smuzhiyun static int shmobile_smp_apmu_do_suspend(unsigned long cpu)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0);
151*4882a593Smuzhiyun 	shmobile_smp_apmu_cpu_shutdown(cpu);
152*4882a593Smuzhiyun 	cpu_do_idle(); /* WFI selects Core Standby */
153*4882a593Smuzhiyun 	return 1;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
cpu_leave_lowpower(void)156*4882a593Smuzhiyun static inline void cpu_leave_lowpower(void)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	unsigned int v;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	asm volatile("mrc    p15, 0, %0, c1, c0, 0\n"
161*4882a593Smuzhiyun 		     "       orr     %0, %0, %1\n"
162*4882a593Smuzhiyun 		     "       mcr     p15, 0, %0, c1, c0, 0\n"
163*4882a593Smuzhiyun 		     "       mrc     p15, 0, %0, c1, c0, 1\n"
164*4882a593Smuzhiyun 		     "       orr     %0, %0, %2\n"
165*4882a593Smuzhiyun 		     "       mcr     p15, 0, %0, c1, c0, 1\n"
166*4882a593Smuzhiyun 		     : "=&r" (v)
167*4882a593Smuzhiyun 		     : "Ir" (CR_C), "Ir" (0x40)
168*4882a593Smuzhiyun 		     : "cc");
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
shmobile_smp_apmu_enter_suspend(suspend_state_t state)171*4882a593Smuzhiyun static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	cpu_suspend(smp_processor_id(), shmobile_smp_apmu_do_suspend);
174*4882a593Smuzhiyun 	cpu_leave_lowpower();
175*4882a593Smuzhiyun 	return 0;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
shmobile_smp_apmu_suspend_init(void)178*4882a593Smuzhiyun void __init shmobile_smp_apmu_suspend_init(void)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun #endif
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #ifdef CONFIG_SMP
apmu_init_cpu(struct resource * res,int cpu,int bit)185*4882a593Smuzhiyun static void apmu_init_cpu(struct resource *res, int cpu, int bit)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	u32 x;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem)
190*4882a593Smuzhiyun 		return;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	apmu_cpus[cpu].iomem = ioremap(res->start, resource_size(res));
193*4882a593Smuzhiyun 	apmu_cpus[cpu].bit = bit;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	pr_debug("apmu ioremap %d %d %pr\n", cpu, bit, res);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/* Setup for debug mode */
198*4882a593Smuzhiyun 	x = readl(apmu_cpus[cpu].iomem + DBGRCR_OFFS);
199*4882a593Smuzhiyun 	x |= DBGCPUREN | DBGCPUNREN(bit) | DBGCPUPREN;
200*4882a593Smuzhiyun 	writel(x, apmu_cpus[cpu].iomem + DBGRCR_OFFS);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun static const struct of_device_id apmu_ids[] = {
204*4882a593Smuzhiyun 	{ .compatible = "renesas,apmu" },
205*4882a593Smuzhiyun 	{ /*sentinel*/ }
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun 
apmu_parse_dt(void (* fn)(struct resource * res,int cpu,int bit))208*4882a593Smuzhiyun static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit))
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct device_node *np_apmu, *np_cpu;
211*4882a593Smuzhiyun 	struct resource res;
212*4882a593Smuzhiyun 	int bit, index;
213*4882a593Smuzhiyun 	u32 id;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	for_each_matching_node(np_apmu, apmu_ids) {
216*4882a593Smuzhiyun 		/* only enable the cluster that includes the boot CPU */
217*4882a593Smuzhiyun 		bool is_allowed = false;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		for (bit = 0; bit < CONFIG_NR_CPUS; bit++) {
220*4882a593Smuzhiyun 			np_cpu = of_parse_phandle(np_apmu, "cpus", bit);
221*4882a593Smuzhiyun 			if (np_cpu) {
222*4882a593Smuzhiyun 				if (!of_property_read_u32(np_cpu, "reg", &id)) {
223*4882a593Smuzhiyun 					if (id == cpu_logical_map(0)) {
224*4882a593Smuzhiyun 						is_allowed = true;
225*4882a593Smuzhiyun 						of_node_put(np_cpu);
226*4882a593Smuzhiyun 						break;
227*4882a593Smuzhiyun 					}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 				}
230*4882a593Smuzhiyun 				of_node_put(np_cpu);
231*4882a593Smuzhiyun 			}
232*4882a593Smuzhiyun 		}
233*4882a593Smuzhiyun 		if (!is_allowed)
234*4882a593Smuzhiyun 			continue;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		for (bit = 0; bit < CONFIG_NR_CPUS; bit++) {
237*4882a593Smuzhiyun 			np_cpu = of_parse_phandle(np_apmu, "cpus", bit);
238*4882a593Smuzhiyun 			if (np_cpu) {
239*4882a593Smuzhiyun 				if (!of_property_read_u32(np_cpu, "reg", &id)) {
240*4882a593Smuzhiyun 					index = get_logical_index(id);
241*4882a593Smuzhiyun 					if ((index >= 0) &&
242*4882a593Smuzhiyun 					    !of_address_to_resource(np_apmu,
243*4882a593Smuzhiyun 								    0, &res))
244*4882a593Smuzhiyun 						fn(&res, index, bit);
245*4882a593Smuzhiyun 				}
246*4882a593Smuzhiyun 				of_node_put(np_cpu);
247*4882a593Smuzhiyun 			}
248*4882a593Smuzhiyun 		}
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
shmobile_smp_apmu_setup_boot(void)252*4882a593Smuzhiyun static void __init shmobile_smp_apmu_setup_boot(void)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	/* install boot code shared by all CPUs */
255*4882a593Smuzhiyun 	shmobile_boot_fn = __pa_symbol(shmobile_smp_boot);
256*4882a593Smuzhiyun 	shmobile_boot_fn_gen2 = shmobile_boot_fn;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
shmobile_smp_apmu_boot_secondary(unsigned int cpu,struct task_struct * idle)259*4882a593Smuzhiyun static int shmobile_smp_apmu_boot_secondary(unsigned int cpu,
260*4882a593Smuzhiyun 					    struct task_struct *idle)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	/* For this particular CPU register boot vector */
263*4882a593Smuzhiyun 	shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_apmu), 0);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	return apmu_wrap(cpu, apmu_power_on);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
shmobile_smp_apmu_prepare_cpus_dt(unsigned int max_cpus)268*4882a593Smuzhiyun static void __init shmobile_smp_apmu_prepare_cpus_dt(unsigned int max_cpus)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	shmobile_smp_apmu_setup_boot();
271*4882a593Smuzhiyun 	apmu_parse_dt(apmu_init_cpu);
272*4882a593Smuzhiyun 	rcar_gen2_pm_init();
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun static struct smp_operations apmu_smp_ops __initdata = {
276*4882a593Smuzhiyun 	.smp_prepare_cpus	= shmobile_smp_apmu_prepare_cpus_dt,
277*4882a593Smuzhiyun 	.smp_boot_secondary	= shmobile_smp_apmu_boot_secondary,
278*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
279*4882a593Smuzhiyun 	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
280*4882a593Smuzhiyun 	.cpu_die		= shmobile_smp_apmu_cpu_die,
281*4882a593Smuzhiyun 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun CPU_METHOD_OF_DECLARE(shmobile_smp_apmu, "renesas,apmu", &apmu_smp_ops);
286*4882a593Smuzhiyun #endif /* CONFIG_SMP */
287