xref: /OK3568_Linux_fs/kernel/arch/arm/common/mcpm_head.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Created by:  Nicolas Pitre, March 2012
6*4882a593Smuzhiyun * Copyright:   (C) 2012-2013  Linaro Limited
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Refer to Documentation/arm/cluster-pm-race-avoidance.rst
9*4882a593Smuzhiyun * for details of the synchronisation algorithms used here.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun#include <linux/linkage.h>
13*4882a593Smuzhiyun#include <asm/mcpm.h>
14*4882a593Smuzhiyun#include <asm/assembler.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun#include "vlock.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun.if MCPM_SYNC_CLUSTER_CPUS
19*4882a593Smuzhiyun.error "cpus must be the first member of struct mcpm_sync_struct"
20*4882a593Smuzhiyun.endif
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun	.macro	pr_dbg	string
23*4882a593Smuzhiyun#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
24*4882a593Smuzhiyun	b	1901f
25*4882a593Smuzhiyun1902:	.asciz	"CPU"
26*4882a593Smuzhiyun1903:	.asciz	" cluster"
27*4882a593Smuzhiyun1904:	.asciz	": \string"
28*4882a593Smuzhiyun	.align
29*4882a593Smuzhiyun1901:	adr	r0, 1902b
30*4882a593Smuzhiyun	bl	printascii
31*4882a593Smuzhiyun	mov	r0, r9
32*4882a593Smuzhiyun	bl	printhex2
33*4882a593Smuzhiyun	adr	r0, 1903b
34*4882a593Smuzhiyun	bl	printascii
35*4882a593Smuzhiyun	mov	r0, r10
36*4882a593Smuzhiyun	bl	printhex2
37*4882a593Smuzhiyun	adr	r0, 1904b
38*4882a593Smuzhiyun	bl	printascii
39*4882a593Smuzhiyun#endif
40*4882a593Smuzhiyun	.endm
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun	.arm
43*4882a593Smuzhiyun	.align
44*4882a593Smuzhiyun
45*4882a593SmuzhiyunENTRY(mcpm_entry_point)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun ARM_BE8(setend        be)
48*4882a593Smuzhiyun THUMB(	badr	r12, 1f		)
49*4882a593Smuzhiyun THUMB(	bx	r12		)
50*4882a593Smuzhiyun THUMB(	.thumb			)
51*4882a593Smuzhiyun1:
52*4882a593Smuzhiyun	mrc	p15, 0, r0, c0, c0, 5		@ MPIDR
53*4882a593Smuzhiyun	ubfx	r9, r0, #0, #8			@ r9 = cpu
54*4882a593Smuzhiyun	ubfx	r10, r0, #8, #8			@ r10 = cluster
55*4882a593Smuzhiyun	mov	r3, #MAX_CPUS_PER_CLUSTER
56*4882a593Smuzhiyun	mla	r4, r3, r10, r9			@ r4 = canonical CPU index
57*4882a593Smuzhiyun	cmp	r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
58*4882a593Smuzhiyun	blo	2f
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun	/* We didn't expect this CPU.  Try to cheaply make it quiet. */
61*4882a593Smuzhiyun1:	wfi
62*4882a593Smuzhiyun	wfe
63*4882a593Smuzhiyun	b	1b
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun2:	pr_dbg	"kernel mcpm_entry_point\n"
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun	/*
68*4882a593Smuzhiyun	 * MMU is off so we need to get to various variables in a
69*4882a593Smuzhiyun	 * position independent way.
70*4882a593Smuzhiyun	 */
71*4882a593Smuzhiyun	adr	r5, 3f
72*4882a593Smuzhiyun	ldmia	r5, {r0, r6, r7, r8, r11}
73*4882a593Smuzhiyun	add	r0, r5, r0			@ r0 = mcpm_entry_early_pokes
74*4882a593Smuzhiyun	add	r6, r5, r6			@ r6 = mcpm_entry_vectors
75*4882a593Smuzhiyun	ldr	r7, [r5, r7]			@ r7 = mcpm_power_up_setup_phys
76*4882a593Smuzhiyun	add	r8, r5, r8			@ r8 = mcpm_sync
77*4882a593Smuzhiyun	add	r11, r5, r11			@ r11 = first_man_locks
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun	@ Perform an early poke, if any
80*4882a593Smuzhiyun	add	r0, r0, r4, lsl #3
81*4882a593Smuzhiyun	ldmia	r0, {r0, r1}
82*4882a593Smuzhiyun	teq	r0, #0
83*4882a593Smuzhiyun	strne	r1, [r0]
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun	mov	r0, #MCPM_SYNC_CLUSTER_SIZE
86*4882a593Smuzhiyun	mla	r8, r0, r10, r8			@ r8 = sync cluster base
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun	@ Signal that this CPU is coming UP:
89*4882a593Smuzhiyun	mov	r0, #CPU_COMING_UP
90*4882a593Smuzhiyun	mov	r5, #MCPM_SYNC_CPU_SIZE
91*4882a593Smuzhiyun	mla	r5, r9, r5, r8			@ r5 = sync cpu address
92*4882a593Smuzhiyun	strb	r0, [r5]
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun	@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
95*4882a593Smuzhiyun	@ state, because there is at least one active CPU (this CPU).
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun	mov	r0, #VLOCK_SIZE
98*4882a593Smuzhiyun	mla	r11, r0, r10, r11		@ r11 = cluster first man lock
99*4882a593Smuzhiyun	mov	r0, r11
100*4882a593Smuzhiyun	mov	r1, r9				@ cpu
101*4882a593Smuzhiyun	bl	vlock_trylock			@ implies DMB
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun	cmp	r0, #0				@ failed to get the lock?
104*4882a593Smuzhiyun	bne	mcpm_setup_wait		@ wait for cluster setup if so
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
107*4882a593Smuzhiyun	cmp	r0, #CLUSTER_UP			@ cluster already up?
108*4882a593Smuzhiyun	bne	mcpm_setup			@ if not, set up the cluster
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun	@ Otherwise, release the first man lock and skip setup:
111*4882a593Smuzhiyun	mov	r0, r11
112*4882a593Smuzhiyun	bl	vlock_unlock
113*4882a593Smuzhiyun	b	mcpm_setup_complete
114*4882a593Smuzhiyun
115*4882a593Smuzhiyunmcpm_setup:
116*4882a593Smuzhiyun	@ Control dependency implies strb not observable before previous ldrb.
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun	@ Signal that the cluster is being brought up:
119*4882a593Smuzhiyun	mov	r0, #INBOUND_COMING_UP
120*4882a593Smuzhiyun	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
121*4882a593Smuzhiyun	dmb
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun	@ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
124*4882a593Smuzhiyun	@ point onwards will observe INBOUND_COMING_UP and abort.
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun	@ Wait for any previously-pending cluster teardown operations to abort
127*4882a593Smuzhiyun	@ or complete:
128*4882a593Smuzhiyunmcpm_teardown_wait:
129*4882a593Smuzhiyun	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
130*4882a593Smuzhiyun	cmp	r0, #CLUSTER_GOING_DOWN
131*4882a593Smuzhiyun	bne	first_man_setup
132*4882a593Smuzhiyun	wfe
133*4882a593Smuzhiyun	b	mcpm_teardown_wait
134*4882a593Smuzhiyun
135*4882a593Smuzhiyunfirst_man_setup:
136*4882a593Smuzhiyun	dmb
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun	@ If the outbound gave up before teardown started, skip cluster setup:
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun	cmp	r0, #CLUSTER_UP
141*4882a593Smuzhiyun	beq	mcpm_setup_leave
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun	@ power_up_setup is now responsible for setting up the cluster:
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun	cmp	r7, #0
146*4882a593Smuzhiyun	mov	r0, #1		@ second (cluster) affinity level
147*4882a593Smuzhiyun	blxne	r7		@ Call power_up_setup if defined
148*4882a593Smuzhiyun	dmb
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun	mov	r0, #CLUSTER_UP
151*4882a593Smuzhiyun	strb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
152*4882a593Smuzhiyun	dmb
153*4882a593Smuzhiyun
154*4882a593Smuzhiyunmcpm_setup_leave:
155*4882a593Smuzhiyun	@ Leave the cluster setup critical section:
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun	mov	r0, #INBOUND_NOT_COMING_UP
158*4882a593Smuzhiyun	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
159*4882a593Smuzhiyun	dsb	st
160*4882a593Smuzhiyun	sev
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun	mov	r0, r11
163*4882a593Smuzhiyun	bl	vlock_unlock	@ implies DMB
164*4882a593Smuzhiyun	b	mcpm_setup_complete
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun	@ In the contended case, non-first men wait here for cluster setup
167*4882a593Smuzhiyun	@ to complete:
168*4882a593Smuzhiyunmcpm_setup_wait:
169*4882a593Smuzhiyun	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
170*4882a593Smuzhiyun	cmp	r0, #CLUSTER_UP
171*4882a593Smuzhiyun	wfene
172*4882a593Smuzhiyun	bne	mcpm_setup_wait
173*4882a593Smuzhiyun	dmb
174*4882a593Smuzhiyun
175*4882a593Smuzhiyunmcpm_setup_complete:
176*4882a593Smuzhiyun	@ If a platform-specific CPU setup hook is needed, it is
177*4882a593Smuzhiyun	@ called from here.
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun	cmp	r7, #0
180*4882a593Smuzhiyun	mov	r0, #0		@ first (CPU) affinity level
181*4882a593Smuzhiyun	blxne	r7		@ Call power_up_setup if defined
182*4882a593Smuzhiyun	dmb
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun	@ Mark the CPU as up:
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun	mov	r0, #CPU_UP
187*4882a593Smuzhiyun	strb	r0, [r5]
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun	@ Observability order of CPU_UP and opening of the gate does not matter.
190*4882a593Smuzhiyun
191*4882a593Smuzhiyunmcpm_entry_gated:
192*4882a593Smuzhiyun	ldr	r5, [r6, r4, lsl #2]		@ r5 = CPU entry vector
193*4882a593Smuzhiyun	cmp	r5, #0
194*4882a593Smuzhiyun	wfeeq
195*4882a593Smuzhiyun	beq	mcpm_entry_gated
196*4882a593Smuzhiyun	dmb
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun	pr_dbg	"released\n"
199*4882a593Smuzhiyun	bx	r5
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun	.align	2
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun3:	.word	mcpm_entry_early_pokes - .
204*4882a593Smuzhiyun	.word	mcpm_entry_vectors - 3b
205*4882a593Smuzhiyun	.word	mcpm_power_up_setup_phys - 3b
206*4882a593Smuzhiyun	.word	mcpm_sync - 3b
207*4882a593Smuzhiyun	.word	first_man_locks - 3b
208*4882a593Smuzhiyun
209*4882a593SmuzhiyunENDPROC(mcpm_entry_point)
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun	.bss
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun	.align	CACHE_WRITEBACK_ORDER
214*4882a593Smuzhiyun	.type	first_man_locks, #object
215*4882a593Smuzhiyunfirst_man_locks:
216*4882a593Smuzhiyun	.space	VLOCK_SIZE * MAX_NR_CLUSTERS
217*4882a593Smuzhiyun	.align	CACHE_WRITEBACK_ORDER
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun	.type	mcpm_entry_vectors, #object
220*4882a593SmuzhiyunENTRY(mcpm_entry_vectors)
221*4882a593Smuzhiyun	.space	4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun	.type	mcpm_entry_early_pokes, #object
224*4882a593SmuzhiyunENTRY(mcpm_entry_early_pokes)
225*4882a593Smuzhiyun	.space	8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun	.type	mcpm_power_up_setup_phys, #object
228*4882a593SmuzhiyunENTRY(mcpm_power_up_setup_phys)
229*4882a593Smuzhiyun	.space  4		@ set by mcpm_sync_init()
230