xref: /OK3568_Linux_fs/kernel/arch/arm/plat-versatile/hotplug.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Copyright (C) 2002 ARM Ltd.
4*4882a593Smuzhiyun  *  All Rights Reserved
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This hotplug implementation is _specific_ to the situation found on
7*4882a593Smuzhiyun  * ARM development platforms where there is _no_ possibility of actually
8*4882a593Smuzhiyun  * taking a CPU offline, resetting it, or otherwise.  Real platforms must
9*4882a593Smuzhiyun  * NOT copy this code.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/smp.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/smp_plat.h>
16*4882a593Smuzhiyun #include <asm/cp15.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <plat/platsmp.h>
19*4882a593Smuzhiyun 
versatile_immitation_enter_lowpower(unsigned int actrl_mask)20*4882a593Smuzhiyun static inline void versatile_immitation_enter_lowpower(unsigned int actrl_mask)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	unsigned int v;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	asm volatile(
25*4882a593Smuzhiyun 		"mcr	p15, 0, %1, c7, c5, 0\n"
26*4882a593Smuzhiyun 	"	mcr	p15, 0, %1, c7, c10, 4\n"
27*4882a593Smuzhiyun 	/*
28*4882a593Smuzhiyun 	 * Turn off coherency
29*4882a593Smuzhiyun 	 */
30*4882a593Smuzhiyun 	"	mrc	p15, 0, %0, c1, c0, 1\n"
31*4882a593Smuzhiyun 	"	bic	%0, %0, %3\n"
32*4882a593Smuzhiyun 	"	mcr	p15, 0, %0, c1, c0, 1\n"
33*4882a593Smuzhiyun 	"	mrc	p15, 0, %0, c1, c0, 0\n"
34*4882a593Smuzhiyun 	"	bic	%0, %0, %2\n"
35*4882a593Smuzhiyun 	"	mcr	p15, 0, %0, c1, c0, 0\n"
36*4882a593Smuzhiyun 	  : "=&r" (v)
37*4882a593Smuzhiyun 	  : "r" (0), "Ir" (CR_C), "Ir" (actrl_mask)
38*4882a593Smuzhiyun 	  : "cc");
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
versatile_immitation_leave_lowpower(unsigned int actrl_mask)41*4882a593Smuzhiyun static inline void versatile_immitation_leave_lowpower(unsigned int actrl_mask)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	unsigned int v;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	asm volatile(
46*4882a593Smuzhiyun 		"mrc	p15, 0, %0, c1, c0, 0\n"
47*4882a593Smuzhiyun 	"	orr	%0, %0, %1\n"
48*4882a593Smuzhiyun 	"	mcr	p15, 0, %0, c1, c0, 0\n"
49*4882a593Smuzhiyun 	"	mrc	p15, 0, %0, c1, c0, 1\n"
50*4882a593Smuzhiyun 	"	orr	%0, %0, %2\n"
51*4882a593Smuzhiyun 	"	mcr	p15, 0, %0, c1, c0, 1\n"
52*4882a593Smuzhiyun 	  : "=&r" (v)
53*4882a593Smuzhiyun 	  : "Ir" (CR_C), "Ir" (actrl_mask)
54*4882a593Smuzhiyun 	  : "cc");
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
versatile_immitation_do_lowpower(unsigned int cpu,int * spurious)57*4882a593Smuzhiyun static inline void versatile_immitation_do_lowpower(unsigned int cpu, int *spurious)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	/*
60*4882a593Smuzhiyun 	 * there is no power-control hardware on this platform, so all
61*4882a593Smuzhiyun 	 * we can do is put the core into WFI; this is safe as the calling
62*4882a593Smuzhiyun 	 * code will have already disabled interrupts.
63*4882a593Smuzhiyun 	 *
64*4882a593Smuzhiyun 	 * This code should not be used outside Versatile platforms.
65*4882a593Smuzhiyun 	 */
66*4882a593Smuzhiyun 	for (;;) {
67*4882a593Smuzhiyun 		wfi();
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 		if (versatile_cpu_release == cpu_logical_map(cpu)) {
70*4882a593Smuzhiyun 			/*
71*4882a593Smuzhiyun 			 * OK, proper wakeup, we're done
72*4882a593Smuzhiyun 			 */
73*4882a593Smuzhiyun 			break;
74*4882a593Smuzhiyun 		}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		/*
77*4882a593Smuzhiyun 		 * Getting here, means that we have come out of WFI without
78*4882a593Smuzhiyun 		 * having been woken up - this shouldn't happen
79*4882a593Smuzhiyun 		 *
80*4882a593Smuzhiyun 		 * Just note it happening - when we're woken, we can report
81*4882a593Smuzhiyun 		 * its occurrence.
82*4882a593Smuzhiyun 		 */
83*4882a593Smuzhiyun 		(*spurious)++;
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun  * platform-specific code to shutdown a CPU.
89*4882a593Smuzhiyun  * This code supports immitation-style CPU hotplug for Versatile/Realview/
90*4882a593Smuzhiyun  * Versatile Express platforms that are unable to do real CPU hotplug.
91*4882a593Smuzhiyun  */
versatile_immitation_cpu_die(unsigned int cpu,unsigned int actrl_mask)92*4882a593Smuzhiyun void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	int spurious = 0;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	versatile_immitation_enter_lowpower(actrl_mask);
97*4882a593Smuzhiyun 	versatile_immitation_do_lowpower(cpu, &spurious);
98*4882a593Smuzhiyun 	versatile_immitation_leave_lowpower(actrl_mask);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (spurious)
101*4882a593Smuzhiyun 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
102*4882a593Smuzhiyun }
103