1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2013 MundoReader S.L.
4*4882a593Smuzhiyun * Author: Heiko Stuebner <heiko@sntech.de>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/delay.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/smp.h>
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun #include <linux/of.h>
12*4882a593Smuzhiyun #include <linux/of_address.h>
13*4882a593Smuzhiyun #include <linux/regmap.h>
14*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/reset.h>
17*4882a593Smuzhiyun #include <linux/cpu.h>
18*4882a593Smuzhiyun #include <asm/cacheflush.h>
19*4882a593Smuzhiyun #include <asm/cp15.h>
20*4882a593Smuzhiyun #include <asm/smp_scu.h>
21*4882a593Smuzhiyun #include <asm/smp_plat.h>
22*4882a593Smuzhiyun #include <asm/mach/map.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "core.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static void __iomem *scu_base_addr;
27*4882a593Smuzhiyun static void __iomem *sram_base_addr;
28*4882a593Smuzhiyun static int ncores;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define PMU_PWRDN_CON 0x08
31*4882a593Smuzhiyun #define PMU_PWRDN_ST 0x0c
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define PMU_PWRDN_SCU 4
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun static struct regmap *pmu;
36*4882a593Smuzhiyun static int has_pmu = true;
37*4882a593Smuzhiyun
pmu_power_domain_is_on(int pd)38*4882a593Smuzhiyun static int pmu_power_domain_is_on(int pd)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun u32 val;
41*4882a593Smuzhiyun int ret;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun ret = regmap_read(pmu, PMU_PWRDN_ST, &val);
44*4882a593Smuzhiyun if (ret < 0)
45*4882a593Smuzhiyun return ret;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun return !(val & BIT(pd));
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
rockchip_get_core_reset(int cpu)50*4882a593Smuzhiyun static struct reset_control *rockchip_get_core_reset(int cpu)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun struct device *dev = get_cpu_device(cpu);
53*4882a593Smuzhiyun struct device_node *np;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* The cpu device is only available after the initial core bringup */
56*4882a593Smuzhiyun if (dev)
57*4882a593Smuzhiyun np = dev->of_node;
58*4882a593Smuzhiyun else
59*4882a593Smuzhiyun np = of_get_cpu_node(cpu, NULL);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun return of_reset_control_get_exclusive(np, NULL);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
pmu_set_power_domain(int pd,bool on)64*4882a593Smuzhiyun static int pmu_set_power_domain(int pd, bool on)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun u32 val = (on) ? 0 : BIT(pd);
67*4882a593Smuzhiyun struct reset_control *rstc = rockchip_get_core_reset(pd);
68*4882a593Smuzhiyun int ret;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
71*4882a593Smuzhiyun pr_err("%s: could not get reset control for core %d\n",
72*4882a593Smuzhiyun __func__, pd);
73*4882a593Smuzhiyun return PTR_ERR(rstc);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * We need to soft reset the cpu when we turn off the cpu power domain,
78*4882a593Smuzhiyun * or else the active processors might be stalled when the individual
79*4882a593Smuzhiyun * processor is powered down.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun if (!IS_ERR(rstc) && !on)
82*4882a593Smuzhiyun reset_control_assert(rstc);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (has_pmu) {
85*4882a593Smuzhiyun ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
86*4882a593Smuzhiyun if (ret < 0) {
87*4882a593Smuzhiyun pr_err("%s: could not update power domain\n",
88*4882a593Smuzhiyun __func__);
89*4882a593Smuzhiyun return ret;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun ret = -1;
93*4882a593Smuzhiyun while (ret != on) {
94*4882a593Smuzhiyun ret = pmu_power_domain_is_on(pd);
95*4882a593Smuzhiyun if (ret < 0) {
96*4882a593Smuzhiyun pr_err("%s: could not read power domain state\n",
97*4882a593Smuzhiyun __func__);
98*4882a593Smuzhiyun return ret;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (!IS_ERR(rstc)) {
104*4882a593Smuzhiyun if (on)
105*4882a593Smuzhiyun reset_control_deassert(rstc);
106*4882a593Smuzhiyun reset_control_put(rstc);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Handling of CPU cores
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun
rockchip_boot_secondary(unsigned int cpu,struct task_struct * idle)116*4882a593Smuzhiyun static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun int ret;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (!sram_base_addr || (has_pmu && !pmu)) {
121*4882a593Smuzhiyun pr_err("%s: sram or pmu missing for cpu boot\n", __func__);
122*4882a593Smuzhiyun return -ENXIO;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (cpu >= ncores) {
126*4882a593Smuzhiyun pr_err("%s: cpu %d outside maximum number of cpus %d\n",
127*4882a593Smuzhiyun __func__, cpu, ncores);
128*4882a593Smuzhiyun return -ENXIO;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* start the core */
132*4882a593Smuzhiyun ret = pmu_set_power_domain(0 + cpu, true);
133*4882a593Smuzhiyun if (ret < 0)
134*4882a593Smuzhiyun return ret;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * We communicate with the bootrom to active the cpus other
139*4882a593Smuzhiyun * than cpu0, after a blob of initialize code, they will
140*4882a593Smuzhiyun * stay at wfe state, once they are actived, they will check
141*4882a593Smuzhiyun * the mailbox:
142*4882a593Smuzhiyun * sram_base_addr + 4: 0xdeadbeaf
143*4882a593Smuzhiyun * sram_base_addr + 8: start address for pc
144*4882a593Smuzhiyun * The cpu0 need to wait the other cpus other than cpu0 entering
145*4882a593Smuzhiyun * the wfe state.The wait time is affected by many aspects.
146*4882a593Smuzhiyun * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun mdelay(1); /* ensure the cpus other than cpu0 to startup */
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun writel(__pa_symbol(secondary_startup), sram_base_addr + 8);
151*4882a593Smuzhiyun writel(0xDEADBEAF, sram_base_addr + 4);
152*4882a593Smuzhiyun dsb_sev();
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /**
159*4882a593Smuzhiyun * rockchip_smp_prepare_sram - populate necessary sram block
160*4882a593Smuzhiyun * Starting cores execute the code residing at the start of the on-chip sram
161*4882a593Smuzhiyun * after power-on. Therefore make sure, this sram region is reserved and
162*4882a593Smuzhiyun * big enough. After this check, copy the trampoline code that directs the
163*4882a593Smuzhiyun * core to the real startup code in ram into the sram-region.
164*4882a593Smuzhiyun * @node: mmio-sram device node
165*4882a593Smuzhiyun */
rockchip_smp_prepare_sram(struct device_node * node)166*4882a593Smuzhiyun static int __init rockchip_smp_prepare_sram(struct device_node *node)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun unsigned int trampoline_sz = &rockchip_secondary_trampoline_end -
169*4882a593Smuzhiyun &rockchip_secondary_trampoline;
170*4882a593Smuzhiyun struct resource res;
171*4882a593Smuzhiyun unsigned int rsize;
172*4882a593Smuzhiyun int ret;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun ret = of_address_to_resource(node, 0, &res);
175*4882a593Smuzhiyun if (ret < 0) {
176*4882a593Smuzhiyun pr_err("%s: could not get address for node %pOF\n",
177*4882a593Smuzhiyun __func__, node);
178*4882a593Smuzhiyun return ret;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun rsize = resource_size(&res);
182*4882a593Smuzhiyun if (rsize < trampoline_sz) {
183*4882a593Smuzhiyun pr_err("%s: reserved block with size 0x%x is too small for trampoline size 0x%x\n",
184*4882a593Smuzhiyun __func__, rsize, trampoline_sz);
185*4882a593Smuzhiyun return -EINVAL;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* set the boot function for the sram code */
189*4882a593Smuzhiyun rockchip_boot_fn = __pa_symbol(secondary_startup);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* copy the trampoline to sram, that runs during startup of the core */
192*4882a593Smuzhiyun memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
193*4882a593Smuzhiyun flush_cache_all();
194*4882a593Smuzhiyun outer_clean_range(0, trampoline_sz);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun dsb_sev();
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun static const struct regmap_config rockchip_pmu_regmap_config = {
202*4882a593Smuzhiyun .name = "rockchip-pmu",
203*4882a593Smuzhiyun .reg_bits = 32,
204*4882a593Smuzhiyun .val_bits = 32,
205*4882a593Smuzhiyun .reg_stride = 4,
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun
rockchip_smp_prepare_pmu(void)208*4882a593Smuzhiyun static int __init rockchip_smp_prepare_pmu(void)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct device_node *node;
211*4882a593Smuzhiyun void __iomem *pmu_base;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * This function is only called via smp_ops->smp_prepare_cpu().
215*4882a593Smuzhiyun * That only happens if a "/cpus" device tree node exists
216*4882a593Smuzhiyun * and has an "enable-method" property that selects the SMP
217*4882a593Smuzhiyun * operations defined herein.
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun node = of_find_node_by_path("/cpus");
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun pmu = syscon_regmap_lookup_by_phandle(node, "rockchip,pmu");
222*4882a593Smuzhiyun of_node_put(node);
223*4882a593Smuzhiyun if (!IS_ERR(pmu))
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun pmu = syscon_regmap_lookup_by_compatible("rockchip,rk3066-pmu");
227*4882a593Smuzhiyun if (!IS_ERR(pmu))
228*4882a593Smuzhiyun return 0;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* fallback, create our own regmap for the pmu area */
231*4882a593Smuzhiyun pmu = NULL;
232*4882a593Smuzhiyun node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu");
233*4882a593Smuzhiyun if (!node) {
234*4882a593Smuzhiyun pr_err("%s: could not find pmu dt node\n", __func__);
235*4882a593Smuzhiyun return -ENODEV;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun pmu_base = of_iomap(node, 0);
239*4882a593Smuzhiyun of_node_put(node);
240*4882a593Smuzhiyun if (!pmu_base) {
241*4882a593Smuzhiyun pr_err("%s: could not map pmu registers\n", __func__);
242*4882a593Smuzhiyun return -ENOMEM;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun pmu = regmap_init_mmio(NULL, pmu_base, &rockchip_pmu_regmap_config);
246*4882a593Smuzhiyun if (IS_ERR(pmu)) {
247*4882a593Smuzhiyun int ret = PTR_ERR(pmu);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun iounmap(pmu_base);
250*4882a593Smuzhiyun pmu = NULL;
251*4882a593Smuzhiyun pr_err("%s: regmap init failed\n", __func__);
252*4882a593Smuzhiyun return ret;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return 0;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
rockchip_smp_prepare_cpus(unsigned int max_cpus)258*4882a593Smuzhiyun static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct device_node *node;
261*4882a593Smuzhiyun unsigned int i;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram");
264*4882a593Smuzhiyun if (!node) {
265*4882a593Smuzhiyun pr_err("%s: could not find sram dt node\n", __func__);
266*4882a593Smuzhiyun return;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun sram_base_addr = of_iomap(node, 0);
270*4882a593Smuzhiyun if (!sram_base_addr) {
271*4882a593Smuzhiyun pr_err("%s: could not map sram registers\n", __func__);
272*4882a593Smuzhiyun of_node_put(node);
273*4882a593Smuzhiyun return;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (has_pmu && rockchip_smp_prepare_pmu()) {
277*4882a593Smuzhiyun of_node_put(node);
278*4882a593Smuzhiyun return;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
282*4882a593Smuzhiyun if (rockchip_smp_prepare_sram(node)) {
283*4882a593Smuzhiyun of_node_put(node);
284*4882a593Smuzhiyun return;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* enable the SCU power domain */
288*4882a593Smuzhiyun pmu_set_power_domain(PMU_PWRDN_SCU, true);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun of_node_put(node);
291*4882a593Smuzhiyun node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
292*4882a593Smuzhiyun if (!node) {
293*4882a593Smuzhiyun pr_err("%s: missing scu\n", __func__);
294*4882a593Smuzhiyun return;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun scu_base_addr = of_iomap(node, 0);
298*4882a593Smuzhiyun if (!scu_base_addr) {
299*4882a593Smuzhiyun pr_err("%s: could not map scu registers\n", __func__);
300*4882a593Smuzhiyun of_node_put(node);
301*4882a593Smuzhiyun return;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * While the number of cpus is gathered from dt, also get the
306*4882a593Smuzhiyun * number of cores from the scu to verify this value when
307*4882a593Smuzhiyun * booting the cores.
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun ncores = scu_get_core_count(scu_base_addr);
310*4882a593Smuzhiyun pr_err("%s: ncores %d\n", __func__, ncores);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun scu_enable(scu_base_addr);
313*4882a593Smuzhiyun } else {
314*4882a593Smuzhiyun unsigned int l2ctlr;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
317*4882a593Smuzhiyun ncores = ((l2ctlr >> 24) & 0x3) + 1;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun of_node_put(node);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Make sure that all cores except the first are really off */
322*4882a593Smuzhiyun for (i = 1; i < ncores; i++)
323*4882a593Smuzhiyun pmu_set_power_domain(0 + i, false);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
rk3036_smp_prepare_cpus(unsigned int max_cpus)326*4882a593Smuzhiyun static void __init rk3036_smp_prepare_cpus(unsigned int max_cpus)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun has_pmu = false;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun rockchip_smp_prepare_cpus(max_cpus);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
rockchip_cpu_kill(unsigned int cpu)334*4882a593Smuzhiyun static int rockchip_cpu_kill(unsigned int cpu)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun * We need a delay here to ensure that the dying CPU can finish
338*4882a593Smuzhiyun * executing v7_coherency_exit() and reach the WFI/WFE state
339*4882a593Smuzhiyun * prior to having the power domain disabled.
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun mdelay(1);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun pmu_set_power_domain(0 + cpu, false);
344*4882a593Smuzhiyun return 1;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
rockchip_cpu_die(unsigned int cpu)347*4882a593Smuzhiyun static void rockchip_cpu_die(unsigned int cpu)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun v7_exit_coherency_flush(louis);
350*4882a593Smuzhiyun while (1)
351*4882a593Smuzhiyun cpu_do_idle();
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun #endif
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun static const struct smp_operations rk3036_smp_ops __initconst = {
356*4882a593Smuzhiyun .smp_prepare_cpus = rk3036_smp_prepare_cpus,
357*4882a593Smuzhiyun .smp_boot_secondary = rockchip_boot_secondary,
358*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
359*4882a593Smuzhiyun .cpu_kill = rockchip_cpu_kill,
360*4882a593Smuzhiyun .cpu_die = rockchip_cpu_die,
361*4882a593Smuzhiyun #endif
362*4882a593Smuzhiyun };
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun static const struct smp_operations rockchip_smp_ops __initconst = {
365*4882a593Smuzhiyun .smp_prepare_cpus = rockchip_smp_prepare_cpus,
366*4882a593Smuzhiyun .smp_boot_secondary = rockchip_boot_secondary,
367*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
368*4882a593Smuzhiyun .cpu_kill = rockchip_cpu_kill,
369*4882a593Smuzhiyun .cpu_die = rockchip_cpu_die,
370*4882a593Smuzhiyun #endif
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun CPU_METHOD_OF_DECLARE(rk3036_smp, "rockchip,rk3036-smp", &rk3036_smp_ops);
374*4882a593Smuzhiyun CPU_METHOD_OF_DECLARE(rk3066_smp, "rockchip,rk3066-smp", &rockchip_smp_ops);
375