1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3*4882a593Smuzhiyun // http://www.samsung.com
4*4882a593Smuzhiyun //
5*4882a593Smuzhiyun // Cloned from linux/arch/arm/mach-vexpress/platsmp.c
6*4882a593Smuzhiyun //
7*4882a593Smuzhiyun // Copyright (C) 2002 ARM Ltd.
8*4882a593Smuzhiyun // All Rights Reserved
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/delay.h>
13*4882a593Smuzhiyun #include <linux/jiffies.h>
14*4882a593Smuzhiyun #include <linux/smp.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/soc/samsung/exynos-regs-pmu.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <asm/cacheflush.h>
20*4882a593Smuzhiyun #include <asm/cp15.h>
21*4882a593Smuzhiyun #include <asm/smp_plat.h>
22*4882a593Smuzhiyun #include <asm/smp_scu.h>
23*4882a593Smuzhiyun #include <asm/firmware.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "common.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun extern void exynos4_secondary_startup(void);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* XXX exynos_pen_release is cargo culted code - DO NOT COPY XXX */
30*4882a593Smuzhiyun volatile int exynos_pen_release = -1;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
cpu_leave_lowpower(u32 core_id)33*4882a593Smuzhiyun static inline void cpu_leave_lowpower(u32 core_id)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun unsigned int v;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun asm volatile(
38*4882a593Smuzhiyun "mrc p15, 0, %0, c1, c0, 0\n"
39*4882a593Smuzhiyun " orr %0, %0, %1\n"
40*4882a593Smuzhiyun " mcr p15, 0, %0, c1, c0, 0\n"
41*4882a593Smuzhiyun " mrc p15, 0, %0, c1, c0, 1\n"
42*4882a593Smuzhiyun " orr %0, %0, %2\n"
43*4882a593Smuzhiyun " mcr p15, 0, %0, c1, c0, 1\n"
44*4882a593Smuzhiyun : "=&r" (v)
45*4882a593Smuzhiyun : "Ir" (CR_C), "Ir" (0x40)
46*4882a593Smuzhiyun : "cc");
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
platform_do_lowpower(unsigned int cpu,int * spurious)49*4882a593Smuzhiyun static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun u32 mpidr = cpu_logical_map(cpu);
52*4882a593Smuzhiyun u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun for (;;) {
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* Turn the CPU off on next WFI instruction. */
57*4882a593Smuzhiyun exynos_cpu_power_down(core_id);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun wfi();
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (exynos_pen_release == core_id) {
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * OK, proper wakeup, we're done
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun break;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Getting here, means that we have come out of WFI without
70*4882a593Smuzhiyun * having been woken up - this shouldn't happen
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * Just note it happening - when we're woken, we can report
73*4882a593Smuzhiyun * its occurrence.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun (*spurious)++;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun #endif /* CONFIG_HOTPLUG_CPU */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /**
81*4882a593Smuzhiyun * exynos_core_power_down : power down the specified cpu
82*4882a593Smuzhiyun * @cpu : the cpu to power down
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * Power down the specified cpu. The sequence must be finished by a
85*4882a593Smuzhiyun * call to cpu_do_idle()
86*4882a593Smuzhiyun *
87*4882a593Smuzhiyun */
exynos_cpu_power_down(int cpu)88*4882a593Smuzhiyun void exynos_cpu_power_down(int cpu)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun u32 core_conf;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * Bypass power down for CPU0 during suspend. Check for
95*4882a593Smuzhiyun * the SYS_PWR_REG value to decide if we are suspending
96*4882a593Smuzhiyun * the system.
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (!(val & S5P_CORE_LOCAL_PWR_EN))
101*4882a593Smuzhiyun return;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
105*4882a593Smuzhiyun core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
106*4882a593Smuzhiyun pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun * exynos_cpu_power_up : power up the specified cpu
111*4882a593Smuzhiyun * @cpu : the cpu to power up
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * Power up the specified cpu
114*4882a593Smuzhiyun */
exynos_cpu_power_up(int cpu)115*4882a593Smuzhiyun void exynos_cpu_power_up(int cpu)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (soc_is_exynos3250())
120*4882a593Smuzhiyun core_conf |= S5P_CORE_AUTOWAKEUP_EN;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun pmu_raw_writel(core_conf,
123*4882a593Smuzhiyun EXYNOS_ARM_CORE_CONFIGURATION(cpu));
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /**
127*4882a593Smuzhiyun * exynos_cpu_power_state : returns the power state of the cpu
128*4882a593Smuzhiyun * @cpu : the cpu to retrieve the power state from
129*4882a593Smuzhiyun *
130*4882a593Smuzhiyun */
exynos_cpu_power_state(int cpu)131*4882a593Smuzhiyun int exynos_cpu_power_state(int cpu)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
134*4882a593Smuzhiyun S5P_CORE_LOCAL_PWR_EN);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /**
138*4882a593Smuzhiyun * exynos_cluster_power_down : power down the specified cluster
139*4882a593Smuzhiyun * @cluster : the cluster to power down
140*4882a593Smuzhiyun */
exynos_cluster_power_down(int cluster)141*4882a593Smuzhiyun void exynos_cluster_power_down(int cluster)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /**
147*4882a593Smuzhiyun * exynos_cluster_power_up : power up the specified cluster
148*4882a593Smuzhiyun * @cluster : the cluster to power up
149*4882a593Smuzhiyun */
exynos_cluster_power_up(int cluster)150*4882a593Smuzhiyun void exynos_cluster_power_up(int cluster)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
153*4882a593Smuzhiyun EXYNOS_COMMON_CONFIGURATION(cluster));
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /**
157*4882a593Smuzhiyun * exynos_cluster_power_state : returns the power state of the cluster
158*4882a593Smuzhiyun * @cluster : the cluster to retrieve the power state from
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun */
exynos_cluster_power_state(int cluster)161*4882a593Smuzhiyun int exynos_cluster_power_state(int cluster)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
164*4882a593Smuzhiyun S5P_CORE_LOCAL_PWR_EN);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun * exynos_scu_enable : enables SCU for Cortex-A9 based system
169*4882a593Smuzhiyun */
exynos_scu_enable(void)170*4882a593Smuzhiyun void exynos_scu_enable(void)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct device_node *np;
173*4882a593Smuzhiyun static void __iomem *scu_base;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (!scu_base) {
176*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
177*4882a593Smuzhiyun if (np) {
178*4882a593Smuzhiyun scu_base = of_iomap(np, 0);
179*4882a593Smuzhiyun of_node_put(np);
180*4882a593Smuzhiyun } else {
181*4882a593Smuzhiyun scu_base = ioremap(scu_a9_get_base(), SZ_4K);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun scu_enable(scu_base);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
cpu_boot_reg_base(void)187*4882a593Smuzhiyun static void __iomem *cpu_boot_reg_base(void)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun if (soc_is_exynos4210() && exynos_rev() == EXYNOS4210_REV_1_1)
190*4882a593Smuzhiyun return pmu_base_addr + S5P_INFORM5;
191*4882a593Smuzhiyun return sysram_base_addr;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
cpu_boot_reg(int cpu)194*4882a593Smuzhiyun static inline void __iomem *cpu_boot_reg(int cpu)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun void __iomem *boot_reg;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun boot_reg = cpu_boot_reg_base();
199*4882a593Smuzhiyun if (!boot_reg)
200*4882a593Smuzhiyun return IOMEM_ERR_PTR(-ENODEV);
201*4882a593Smuzhiyun if (soc_is_exynos4412())
202*4882a593Smuzhiyun boot_reg += 4*cpu;
203*4882a593Smuzhiyun else if (soc_is_exynos5420() || soc_is_exynos5800())
204*4882a593Smuzhiyun boot_reg += 4;
205*4882a593Smuzhiyun return boot_reg;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * Set wake up by local power mode and execute software reset for given core.
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * Currently this is needed only when booting secondary CPU on Exynos3250.
212*4882a593Smuzhiyun */
exynos_core_restart(u32 core_id)213*4882a593Smuzhiyun void exynos_core_restart(u32 core_id)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun unsigned int timeout = 16;
216*4882a593Smuzhiyun u32 val;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (!of_machine_is_compatible("samsung,exynos3250"))
219*4882a593Smuzhiyun return;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun while (timeout && !pmu_raw_readl(S5P_PMU_SPARE2)) {
222*4882a593Smuzhiyun timeout--;
223*4882a593Smuzhiyun udelay(10);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun if (timeout == 0) {
226*4882a593Smuzhiyun pr_err("cpu core %u restart failed\n", core_id);
227*4882a593Smuzhiyun return;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun udelay(10);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
232*4882a593Smuzhiyun val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
233*4882a593Smuzhiyun pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun * XXX CARGO CULTED CODE - DO NOT COPY XXX
240*4882a593Smuzhiyun *
241*4882a593Smuzhiyun * Write exynos_pen_release in a way that is guaranteed to be visible to
242*4882a593Smuzhiyun * all observers, irrespective of whether they're taking part in coherency
243*4882a593Smuzhiyun * or not. This is necessary for the hotplug code to work reliably.
244*4882a593Smuzhiyun */
exynos_write_pen_release(int val)245*4882a593Smuzhiyun static void exynos_write_pen_release(int val)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun exynos_pen_release = val;
248*4882a593Smuzhiyun smp_wmb();
249*4882a593Smuzhiyun sync_cache_w(&exynos_pen_release);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun static DEFINE_SPINLOCK(boot_lock);
253*4882a593Smuzhiyun
exynos_secondary_init(unsigned int cpu)254*4882a593Smuzhiyun static void exynos_secondary_init(unsigned int cpu)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun * let the primary processor know we're out of the
258*4882a593Smuzhiyun * pen, then head off into the C entry point
259*4882a593Smuzhiyun */
260*4882a593Smuzhiyun exynos_write_pen_release(-1);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * Synchronise with the boot thread.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun spin_lock(&boot_lock);
266*4882a593Smuzhiyun spin_unlock(&boot_lock);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
exynos_set_boot_addr(u32 core_id,unsigned long boot_addr)269*4882a593Smuzhiyun int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun int ret;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * Try to set boot address using firmware first
275*4882a593Smuzhiyun * and fall back to boot register if it fails.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
278*4882a593Smuzhiyun if (ret && ret != -ENOSYS)
279*4882a593Smuzhiyun goto fail;
280*4882a593Smuzhiyun if (ret == -ENOSYS) {
281*4882a593Smuzhiyun void __iomem *boot_reg = cpu_boot_reg(core_id);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (IS_ERR(boot_reg)) {
284*4882a593Smuzhiyun ret = PTR_ERR(boot_reg);
285*4882a593Smuzhiyun goto fail;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun writel_relaxed(boot_addr, boot_reg);
288*4882a593Smuzhiyun ret = 0;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun fail:
291*4882a593Smuzhiyun return ret;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
exynos_get_boot_addr(u32 core_id,unsigned long * boot_addr)294*4882a593Smuzhiyun int exynos_get_boot_addr(u32 core_id, unsigned long *boot_addr)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun int ret;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /*
299*4882a593Smuzhiyun * Try to get boot address using firmware first
300*4882a593Smuzhiyun * and fall back to boot register if it fails.
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun ret = call_firmware_op(get_cpu_boot_addr, core_id, boot_addr);
303*4882a593Smuzhiyun if (ret && ret != -ENOSYS)
304*4882a593Smuzhiyun goto fail;
305*4882a593Smuzhiyun if (ret == -ENOSYS) {
306*4882a593Smuzhiyun void __iomem *boot_reg = cpu_boot_reg(core_id);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (IS_ERR(boot_reg)) {
309*4882a593Smuzhiyun ret = PTR_ERR(boot_reg);
310*4882a593Smuzhiyun goto fail;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun *boot_addr = readl_relaxed(boot_reg);
313*4882a593Smuzhiyun ret = 0;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun fail:
316*4882a593Smuzhiyun return ret;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
exynos_boot_secondary(unsigned int cpu,struct task_struct * idle)319*4882a593Smuzhiyun static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun unsigned long timeout;
322*4882a593Smuzhiyun u32 mpidr = cpu_logical_map(cpu);
323*4882a593Smuzhiyun u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
324*4882a593Smuzhiyun int ret = -ENOSYS;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * Set synchronisation state between this boot processor
328*4882a593Smuzhiyun * and the secondary one
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun spin_lock(&boot_lock);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * The secondary processor is waiting to be released from
334*4882a593Smuzhiyun * the holding pen - release it, then wait for it to flag
335*4882a593Smuzhiyun * that it has been released by resetting exynos_pen_release.
336*4882a593Smuzhiyun *
337*4882a593Smuzhiyun * Note that "exynos_pen_release" is the hardware CPU core ID, whereas
338*4882a593Smuzhiyun * "cpu" is Linux's internal ID.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun exynos_write_pen_release(core_id);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (!exynos_cpu_power_state(core_id)) {
343*4882a593Smuzhiyun exynos_cpu_power_up(core_id);
344*4882a593Smuzhiyun timeout = 10;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* wait max 10 ms until cpu1 is on */
347*4882a593Smuzhiyun while (exynos_cpu_power_state(core_id)
348*4882a593Smuzhiyun != S5P_CORE_LOCAL_PWR_EN) {
349*4882a593Smuzhiyun if (timeout == 0)
350*4882a593Smuzhiyun break;
351*4882a593Smuzhiyun timeout--;
352*4882a593Smuzhiyun mdelay(1);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (timeout == 0) {
356*4882a593Smuzhiyun printk(KERN_ERR "cpu1 power enable failed");
357*4882a593Smuzhiyun spin_unlock(&boot_lock);
358*4882a593Smuzhiyun return -ETIMEDOUT;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun exynos_core_restart(core_id);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * Send the secondary CPU a soft interrupt, thereby causing
366*4882a593Smuzhiyun * the boot monitor to read the system wide flags register,
367*4882a593Smuzhiyun * and branch to the address found there.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun timeout = jiffies + (1 * HZ);
371*4882a593Smuzhiyun while (time_before(jiffies, timeout)) {
372*4882a593Smuzhiyun unsigned long boot_addr;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun smp_rmb();
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun boot_addr = __pa_symbol(exynos4_secondary_startup);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun ret = exynos_set_boot_addr(core_id, boot_addr);
379*4882a593Smuzhiyun if (ret)
380*4882a593Smuzhiyun goto fail;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun call_firmware_op(cpu_boot, core_id);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (soc_is_exynos3250())
385*4882a593Smuzhiyun dsb_sev();
386*4882a593Smuzhiyun else
387*4882a593Smuzhiyun arch_send_wakeup_ipi_mask(cpumask_of(cpu));
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (exynos_pen_release == -1)
390*4882a593Smuzhiyun break;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun udelay(10);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun if (exynos_pen_release != -1)
396*4882a593Smuzhiyun ret = -ETIMEDOUT;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * now the secondary core is starting up let it run its
400*4882a593Smuzhiyun * calibrations, then wait for it to finish
401*4882a593Smuzhiyun */
402*4882a593Smuzhiyun fail:
403*4882a593Smuzhiyun spin_unlock(&boot_lock);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun return exynos_pen_release != -1 ? ret : 0;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
exynos_smp_prepare_cpus(unsigned int max_cpus)408*4882a593Smuzhiyun static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun exynos_sysram_init();
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun exynos_set_delayed_reset_assertion(true);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
415*4882a593Smuzhiyun exynos_scu_enable();
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * platform-specific code to shutdown a CPU
421*4882a593Smuzhiyun *
422*4882a593Smuzhiyun * Called with IRQs disabled
423*4882a593Smuzhiyun */
exynos_cpu_die(unsigned int cpu)424*4882a593Smuzhiyun static void exynos_cpu_die(unsigned int cpu)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun int spurious = 0;
427*4882a593Smuzhiyun u32 mpidr = cpu_logical_map(cpu);
428*4882a593Smuzhiyun u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun v7_exit_coherency_flush(louis);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun platform_do_lowpower(cpu, &spurious);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * bring this CPU back into the world of cache
436*4882a593Smuzhiyun * coherency, and then restore interrupts
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun cpu_leave_lowpower(core_id);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (spurious)
441*4882a593Smuzhiyun pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun #endif /* CONFIG_HOTPLUG_CPU */
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun const struct smp_operations exynos_smp_ops __initconst = {
446*4882a593Smuzhiyun .smp_prepare_cpus = exynos_smp_prepare_cpus,
447*4882a593Smuzhiyun .smp_secondary_init = exynos_secondary_init,
448*4882a593Smuzhiyun .smp_boot_secondary = exynos_boot_secondary,
449*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
450*4882a593Smuzhiyun .cpu_die = exynos_cpu_die,
451*4882a593Smuzhiyun #endif
452*4882a593Smuzhiyun };
453