xref: /OK3568_Linux_fs/kernel/arch/powerpc/kernel/misc_64.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * This file contains miscellaneous low-level functions.
4*4882a593Smuzhiyun *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
7*4882a593Smuzhiyun * and Paul Mackerras.
8*4882a593Smuzhiyun * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9*4882a593Smuzhiyun * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun#include <linux/sys.h>
13*4882a593Smuzhiyun#include <asm/unistd.h>
14*4882a593Smuzhiyun#include <asm/errno.h>
15*4882a593Smuzhiyun#include <asm/processor.h>
16*4882a593Smuzhiyun#include <asm/page.h>
17*4882a593Smuzhiyun#include <asm/cache.h>
18*4882a593Smuzhiyun#include <asm/ppc_asm.h>
19*4882a593Smuzhiyun#include <asm/asm-offsets.h>
20*4882a593Smuzhiyun#include <asm/cputable.h>
21*4882a593Smuzhiyun#include <asm/thread_info.h>
22*4882a593Smuzhiyun#include <asm/kexec.h>
23*4882a593Smuzhiyun#include <asm/ptrace.h>
24*4882a593Smuzhiyun#include <asm/mmu.h>
25*4882a593Smuzhiyun#include <asm/export.h>
26*4882a593Smuzhiyun#include <asm/feature-fixups.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun	.text
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun_GLOBAL(call_do_softirq)
31*4882a593Smuzhiyun	mflr	r0
32*4882a593Smuzhiyun	std	r0,16(r1)
33*4882a593Smuzhiyun	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
34*4882a593Smuzhiyun	mr	r1,r3
35*4882a593Smuzhiyun	bl	__do_softirq
36*4882a593Smuzhiyun	ld	r1,0(r1)
37*4882a593Smuzhiyun	ld	r0,16(r1)
38*4882a593Smuzhiyun	mtlr	r0
39*4882a593Smuzhiyun	blr
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun_GLOBAL(call_do_irq)
42*4882a593Smuzhiyun	mflr	r0
43*4882a593Smuzhiyun	std	r0,16(r1)
44*4882a593Smuzhiyun	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
45*4882a593Smuzhiyun	mr	r1,r4
46*4882a593Smuzhiyun	bl	__do_irq
47*4882a593Smuzhiyun	ld	r1,0(r1)
48*4882a593Smuzhiyun	ld	r0,16(r1)
49*4882a593Smuzhiyun	mtlr	r0
50*4882a593Smuzhiyun	blr
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun_GLOBAL(__bswapdi2)
53*4882a593SmuzhiyunEXPORT_SYMBOL(__bswapdi2)
54*4882a593Smuzhiyun	srdi	r8,r3,32
55*4882a593Smuzhiyun	rlwinm	r7,r3,8,0xffffffff
56*4882a593Smuzhiyun	rlwimi	r7,r3,24,0,7
57*4882a593Smuzhiyun	rlwinm	r9,r8,8,0xffffffff
58*4882a593Smuzhiyun	rlwimi	r7,r3,24,16,23
59*4882a593Smuzhiyun	rlwimi	r9,r8,24,0,7
60*4882a593Smuzhiyun	rlwimi	r9,r8,24,16,23
61*4882a593Smuzhiyun	sldi	r7,r7,32
62*4882a593Smuzhiyun	or	r3,r7,r9
63*4882a593Smuzhiyun	blr
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
67*4882a593Smuzhiyun_GLOBAL(rmci_on)
68*4882a593Smuzhiyun	sync
69*4882a593Smuzhiyun	isync
70*4882a593Smuzhiyun	li	r3,0x100
71*4882a593Smuzhiyun	rldicl	r3,r3,32,0
72*4882a593Smuzhiyun	mfspr	r5,SPRN_HID4
73*4882a593Smuzhiyun	or	r5,r5,r3
74*4882a593Smuzhiyun	sync
75*4882a593Smuzhiyun	mtspr	SPRN_HID4,r5
76*4882a593Smuzhiyun	isync
77*4882a593Smuzhiyun	slbia
78*4882a593Smuzhiyun	isync
79*4882a593Smuzhiyun	sync
80*4882a593Smuzhiyun	blr
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun_GLOBAL(rmci_off)
83*4882a593Smuzhiyun	sync
84*4882a593Smuzhiyun	isync
85*4882a593Smuzhiyun	li	r3,0x100
86*4882a593Smuzhiyun	rldicl	r3,r3,32,0
87*4882a593Smuzhiyun	mfspr	r5,SPRN_HID4
88*4882a593Smuzhiyun	andc	r5,r5,r3
89*4882a593Smuzhiyun	sync
90*4882a593Smuzhiyun	mtspr	SPRN_HID4,r5
91*4882a593Smuzhiyun	isync
92*4882a593Smuzhiyun	slbia
93*4882a593Smuzhiyun	isync
94*4882a593Smuzhiyun	sync
95*4882a593Smuzhiyun	blr
96*4882a593Smuzhiyun#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun/*
101*4882a593Smuzhiyun * Do an IO access in real mode
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun_GLOBAL(real_readb)
104*4882a593Smuzhiyun	mfmsr	r7
105*4882a593Smuzhiyun	ori	r0,r7,MSR_DR
106*4882a593Smuzhiyun	xori	r0,r0,MSR_DR
107*4882a593Smuzhiyun	sync
108*4882a593Smuzhiyun	mtmsrd	r0
109*4882a593Smuzhiyun	sync
110*4882a593Smuzhiyun	isync
111*4882a593Smuzhiyun	mfspr	r6,SPRN_HID4
112*4882a593Smuzhiyun	rldicl	r5,r6,32,0
113*4882a593Smuzhiyun	ori	r5,r5,0x100
114*4882a593Smuzhiyun	rldicl	r5,r5,32,0
115*4882a593Smuzhiyun	sync
116*4882a593Smuzhiyun	mtspr	SPRN_HID4,r5
117*4882a593Smuzhiyun	isync
118*4882a593Smuzhiyun	slbia
119*4882a593Smuzhiyun	isync
120*4882a593Smuzhiyun	lbz	r3,0(r3)
121*4882a593Smuzhiyun	sync
122*4882a593Smuzhiyun	mtspr	SPRN_HID4,r6
123*4882a593Smuzhiyun	isync
124*4882a593Smuzhiyun	slbia
125*4882a593Smuzhiyun	isync
126*4882a593Smuzhiyun	mtmsrd	r7
127*4882a593Smuzhiyun	sync
128*4882a593Smuzhiyun	isync
129*4882a593Smuzhiyun	blr
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun	/*
132*4882a593Smuzhiyun * Do an IO access in real mode
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun_GLOBAL(real_writeb)
135*4882a593Smuzhiyun	mfmsr	r7
136*4882a593Smuzhiyun	ori	r0,r7,MSR_DR
137*4882a593Smuzhiyun	xori	r0,r0,MSR_DR
138*4882a593Smuzhiyun	sync
139*4882a593Smuzhiyun	mtmsrd	r0
140*4882a593Smuzhiyun	sync
141*4882a593Smuzhiyun	isync
142*4882a593Smuzhiyun	mfspr	r6,SPRN_HID4
143*4882a593Smuzhiyun	rldicl	r5,r6,32,0
144*4882a593Smuzhiyun	ori	r5,r5,0x100
145*4882a593Smuzhiyun	rldicl	r5,r5,32,0
146*4882a593Smuzhiyun	sync
147*4882a593Smuzhiyun	mtspr	SPRN_HID4,r5
148*4882a593Smuzhiyun	isync
149*4882a593Smuzhiyun	slbia
150*4882a593Smuzhiyun	isync
151*4882a593Smuzhiyun	stb	r3,0(r4)
152*4882a593Smuzhiyun	sync
153*4882a593Smuzhiyun	mtspr	SPRN_HID4,r6
154*4882a593Smuzhiyun	isync
155*4882a593Smuzhiyun	slbia
156*4882a593Smuzhiyun	isync
157*4882a593Smuzhiyun	mtmsrd	r7
158*4882a593Smuzhiyun	sync
159*4882a593Smuzhiyun	isync
160*4882a593Smuzhiyun	blr
161*4882a593Smuzhiyun#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun#ifdef CONFIG_PPC_PASEMI
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun_GLOBAL(real_205_readb)
166*4882a593Smuzhiyun	mfmsr	r7
167*4882a593Smuzhiyun	ori	r0,r7,MSR_DR
168*4882a593Smuzhiyun	xori	r0,r0,MSR_DR
169*4882a593Smuzhiyun	sync
170*4882a593Smuzhiyun	mtmsrd	r0
171*4882a593Smuzhiyun	sync
172*4882a593Smuzhiyun	isync
173*4882a593Smuzhiyun	LBZCIX(R3,R0,R3)
174*4882a593Smuzhiyun	isync
175*4882a593Smuzhiyun	mtmsrd	r7
176*4882a593Smuzhiyun	sync
177*4882a593Smuzhiyun	isync
178*4882a593Smuzhiyun	blr
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun_GLOBAL(real_205_writeb)
181*4882a593Smuzhiyun	mfmsr	r7
182*4882a593Smuzhiyun	ori	r0,r7,MSR_DR
183*4882a593Smuzhiyun	xori	r0,r0,MSR_DR
184*4882a593Smuzhiyun	sync
185*4882a593Smuzhiyun	mtmsrd	r0
186*4882a593Smuzhiyun	sync
187*4882a593Smuzhiyun	isync
188*4882a593Smuzhiyun	STBCIX(R3,R0,R4)
189*4882a593Smuzhiyun	isync
190*4882a593Smuzhiyun	mtmsrd	r7
191*4882a593Smuzhiyun	sync
192*4882a593Smuzhiyun	isync
193*4882a593Smuzhiyun	blr
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun#endif /* CONFIG_PPC_PASEMI */
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
199*4882a593Smuzhiyun/*
200*4882a593Smuzhiyun * SCOM access functions for 970 (FX only for now)
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * unsigned long scom970_read(unsigned int address);
203*4882a593Smuzhiyun * void scom970_write(unsigned int address, unsigned long value);
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * The address passed in is the 24 bits register address. This code
206*4882a593Smuzhiyun * is 970 specific and will not check the status bits, so you should
207*4882a593Smuzhiyun * know what you are doing.
208*4882a593Smuzhiyun */
209*4882a593Smuzhiyun_GLOBAL(scom970_read)
210*4882a593Smuzhiyun	/* interrupts off */
211*4882a593Smuzhiyun	mfmsr	r4
212*4882a593Smuzhiyun	ori	r0,r4,MSR_EE
213*4882a593Smuzhiyun	xori	r0,r0,MSR_EE
214*4882a593Smuzhiyun	mtmsrd	r0,1
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
217*4882a593Smuzhiyun	 * (including parity). On current CPUs they must be 0'd,
218*4882a593Smuzhiyun	 * and finally or in RW bit
219*4882a593Smuzhiyun	 */
220*4882a593Smuzhiyun	rlwinm	r3,r3,8,0,15
221*4882a593Smuzhiyun	ori	r3,r3,0x8000
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun	/* do the actual scom read */
224*4882a593Smuzhiyun	sync
225*4882a593Smuzhiyun	mtspr	SPRN_SCOMC,r3
226*4882a593Smuzhiyun	isync
227*4882a593Smuzhiyun	mfspr	r3,SPRN_SCOMD
228*4882a593Smuzhiyun	isync
229*4882a593Smuzhiyun	mfspr	r0,SPRN_SCOMC
230*4882a593Smuzhiyun	isync
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun	/* XXX:	fixup result on some buggy 970's (ouch ! we lost a bit, bah
233*4882a593Smuzhiyun	 * that's the best we can do). Not implemented yet as we don't use
234*4882a593Smuzhiyun	 * the scom on any of the bogus CPUs yet, but may have to be done
235*4882a593Smuzhiyun	 * ultimately
236*4882a593Smuzhiyun	 */
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun	/* restore interrupts */
239*4882a593Smuzhiyun	mtmsrd	r4,1
240*4882a593Smuzhiyun	blr
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun_GLOBAL(scom970_write)
244*4882a593Smuzhiyun	/* interrupts off */
245*4882a593Smuzhiyun	mfmsr	r5
246*4882a593Smuzhiyun	ori	r0,r5,MSR_EE
247*4882a593Smuzhiyun	xori	r0,r0,MSR_EE
248*4882a593Smuzhiyun	mtmsrd	r0,1
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
251*4882a593Smuzhiyun	 * (including parity). On current CPUs they must be 0'd.
252*4882a593Smuzhiyun	 */
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun	rlwinm	r3,r3,8,0,15
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun	sync
257*4882a593Smuzhiyun	mtspr	SPRN_SCOMD,r4      /* write data */
258*4882a593Smuzhiyun	isync
259*4882a593Smuzhiyun	mtspr	SPRN_SCOMC,r3      /* write command */
260*4882a593Smuzhiyun	isync
261*4882a593Smuzhiyun	mfspr	3,SPRN_SCOMC
262*4882a593Smuzhiyun	isync
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun	/* restore interrupts */
265*4882a593Smuzhiyun	mtmsrd	r5,1
266*4882a593Smuzhiyun	blr
267*4882a593Smuzhiyun#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun/* kexec_wait(phys_cpu)
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * wait for the flag to change, indicating this kernel is going away but
272*4882a593Smuzhiyun * the slave code for the next one is at addresses 0 to 100.
273*4882a593Smuzhiyun *
274*4882a593Smuzhiyun * This is used by all slaves, even those that did not find a matching
275*4882a593Smuzhiyun * paca in the secondary startup code.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * Physical (hardware) cpu id should be in r3.
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun_GLOBAL(kexec_wait)
280*4882a593Smuzhiyun	bl	1f
281*4882a593Smuzhiyun1:	mflr	r5
282*4882a593Smuzhiyun	addi	r5,r5,kexec_flag-1b
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun99:	HMT_LOW
285*4882a593Smuzhiyun#ifdef CONFIG_KEXEC_CORE	/* use no memory without kexec */
286*4882a593Smuzhiyun	lwz	r4,0(r5)
287*4882a593Smuzhiyun	cmpwi	0,r4,0
288*4882a593Smuzhiyun	beq	99b
289*4882a593Smuzhiyun#ifdef CONFIG_PPC_BOOK3S_64
290*4882a593Smuzhiyun	li	r10,0x60
291*4882a593Smuzhiyun	mfmsr	r11
292*4882a593Smuzhiyun	clrrdi	r11,r11,1	/* Clear MSR_LE */
293*4882a593Smuzhiyun	mtsrr0	r10
294*4882a593Smuzhiyun	mtsrr1	r11
295*4882a593Smuzhiyun	rfid
296*4882a593Smuzhiyun#else
297*4882a593Smuzhiyun	/* Create TLB entry in book3e_secondary_core_init */
298*4882a593Smuzhiyun	li	r4,0
299*4882a593Smuzhiyun	ba	0x60
300*4882a593Smuzhiyun#endif
301*4882a593Smuzhiyun#endif
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun/* this can be in text because we won't change it until we are
304*4882a593Smuzhiyun * running in real anyways
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyunkexec_flag:
307*4882a593Smuzhiyun	.long	0
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun#ifdef CONFIG_KEXEC_CORE
311*4882a593Smuzhiyun#ifdef CONFIG_PPC_BOOK3E
312*4882a593Smuzhiyun/*
313*4882a593Smuzhiyun * BOOK3E has no real MMU mode, so we have to setup the initial TLB
314*4882a593Smuzhiyun * for a core to identity map v:0 to p:0.  This current implementation
315*4882a593Smuzhiyun * assumes that 1G is enough for kexec.
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyunkexec_create_tlb:
318*4882a593Smuzhiyun	/*
319*4882a593Smuzhiyun	 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
320*4882a593Smuzhiyun	 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
321*4882a593Smuzhiyun	 */
322*4882a593Smuzhiyun	PPC_TLBILX_ALL(0,R0)
323*4882a593Smuzhiyun	sync
324*4882a593Smuzhiyun	isync
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun	mfspr	r10,SPRN_TLB1CFG
327*4882a593Smuzhiyun	andi.	r10,r10,TLBnCFG_N_ENTRY	/* Extract # entries */
328*4882a593Smuzhiyun	subi	r10,r10,1	/* Last entry: no conflict with kernel text */
329*4882a593Smuzhiyun	lis	r9,MAS0_TLBSEL(1)@h
330*4882a593Smuzhiyun	rlwimi	r9,r10,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r9) */
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun/* Set up a temp identity mapping v:0 to p:0 and return to it. */
333*4882a593Smuzhiyun	mtspr	SPRN_MAS0,r9
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun	lis	r9,(MAS1_VALID|MAS1_IPROT)@h
336*4882a593Smuzhiyun	ori	r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
337*4882a593Smuzhiyun	mtspr	SPRN_MAS1,r9
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun	LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED)
340*4882a593Smuzhiyun	mtspr	SPRN_MAS2,r9
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun	LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
343*4882a593Smuzhiyun	mtspr	SPRN_MAS3,r9
344*4882a593Smuzhiyun	li	r9,0
345*4882a593Smuzhiyun	mtspr	SPRN_MAS7,r9
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun	tlbwe
348*4882a593Smuzhiyun	isync
349*4882a593Smuzhiyun	blr
350*4882a593Smuzhiyun#endif
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun/* kexec_smp_wait(void)
353*4882a593Smuzhiyun *
354*4882a593Smuzhiyun * call with interrupts off
355*4882a593Smuzhiyun * note: this is a terminal routine, it does not save lr
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * get phys id from paca
358*4882a593Smuzhiyun * switch to real mode
359*4882a593Smuzhiyun * mark the paca as no longer used
360*4882a593Smuzhiyun * join other cpus in kexec_wait(phys_id)
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun_GLOBAL(kexec_smp_wait)
363*4882a593Smuzhiyun	lhz	r3,PACAHWCPUID(r13)
364*4882a593Smuzhiyun	bl	real_mode
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun	li	r4,KEXEC_STATE_REAL_MODE
367*4882a593Smuzhiyun	stb	r4,PACAKEXECSTATE(r13)
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun	b	kexec_wait
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun/*
372*4882a593Smuzhiyun * switch to real mode (turn mmu off)
373*4882a593Smuzhiyun * we use the early kernel trick that the hardware ignores bits
374*4882a593Smuzhiyun * 0 and 1 (big endian) of the effective address in real mode
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * don't overwrite r3 here, it is live for kexec_wait above.
377*4882a593Smuzhiyun */
378*4882a593Smuzhiyunreal_mode:	/* assume normal blr return */
379*4882a593Smuzhiyun#ifdef CONFIG_PPC_BOOK3E
380*4882a593Smuzhiyun	/* Create an identity mapping. */
381*4882a593Smuzhiyun	b	kexec_create_tlb
382*4882a593Smuzhiyun#else
383*4882a593Smuzhiyun1:	li	r9,MSR_RI
384*4882a593Smuzhiyun	li	r10,MSR_DR|MSR_IR
385*4882a593Smuzhiyun	mflr	r11		/* return address to SRR0 */
386*4882a593Smuzhiyun	mfmsr	r12
387*4882a593Smuzhiyun	andc	r9,r12,r9
388*4882a593Smuzhiyun	andc	r10,r12,r10
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun	mtmsrd	r9,1
391*4882a593Smuzhiyun	mtspr	SPRN_SRR1,r10
392*4882a593Smuzhiyun	mtspr	SPRN_SRR0,r11
393*4882a593Smuzhiyun	rfid
394*4882a593Smuzhiyun#endif
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun/*
397*4882a593Smuzhiyun * kexec_sequence(newstack, start, image, control, clear_all(),
398*4882a593Smuzhiyun	          copy_with_mmu_off)
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * does the grungy work with stack switching and real mode switches
401*4882a593Smuzhiyun * also does simple calls to other code
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun_GLOBAL(kexec_sequence)
405*4882a593Smuzhiyun	mflr	r0
406*4882a593Smuzhiyun	std	r0,16(r1)
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun	/* switch stacks to newstack -- &kexec_stack.stack */
409*4882a593Smuzhiyun	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
410*4882a593Smuzhiyun	mr	r1,r3
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun	li	r0,0
413*4882a593Smuzhiyun	std	r0,16(r1)
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun	/* save regs for local vars on new stack.
416*4882a593Smuzhiyun	 * yes, we won't go back, but ...
417*4882a593Smuzhiyun	 */
418*4882a593Smuzhiyun	std	r31,-8(r1)
419*4882a593Smuzhiyun	std	r30,-16(r1)
420*4882a593Smuzhiyun	std	r29,-24(r1)
421*4882a593Smuzhiyun	std	r28,-32(r1)
422*4882a593Smuzhiyun	std	r27,-40(r1)
423*4882a593Smuzhiyun	std	r26,-48(r1)
424*4882a593Smuzhiyun	std	r25,-56(r1)
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun	stdu	r1,-STACK_FRAME_OVERHEAD-64(r1)
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun	/* save args into preserved regs */
429*4882a593Smuzhiyun	mr	r31,r3			/* newstack (both) */
430*4882a593Smuzhiyun	mr	r30,r4			/* start (real) */
431*4882a593Smuzhiyun	mr	r29,r5			/* image (virt) */
432*4882a593Smuzhiyun	mr	r28,r6			/* control, unused */
433*4882a593Smuzhiyun	mr	r27,r7			/* clear_all() fn desc */
434*4882a593Smuzhiyun	mr	r26,r8			/* copy_with_mmu_off */
435*4882a593Smuzhiyun	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun	/* disable interrupts, we are overwriting kernel data next */
438*4882a593Smuzhiyun#ifdef CONFIG_PPC_BOOK3E
439*4882a593Smuzhiyun	wrteei	0
440*4882a593Smuzhiyun#else
441*4882a593Smuzhiyun	mfmsr	r3
442*4882a593Smuzhiyun	rlwinm	r3,r3,0,17,15
443*4882a593Smuzhiyun	mtmsrd	r3,1
444*4882a593Smuzhiyun#endif
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun	/* We need to turn the MMU off unless we are in hash mode
447*4882a593Smuzhiyun	 * under a hypervisor
448*4882a593Smuzhiyun	 */
449*4882a593Smuzhiyun	cmpdi	r26,0
450*4882a593Smuzhiyun	beq	1f
451*4882a593Smuzhiyun	bl	real_mode
452*4882a593Smuzhiyun1:
453*4882a593Smuzhiyun	/* copy dest pages, flush whole dest image */
454*4882a593Smuzhiyun	mr	r3,r29
455*4882a593Smuzhiyun	bl	kexec_copy_flush	/* (image) */
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun	/* turn off mmu now if not done earlier */
458*4882a593Smuzhiyun	cmpdi	r26,0
459*4882a593Smuzhiyun	bne	1f
460*4882a593Smuzhiyun	bl	real_mode
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun	/* copy  0x100 bytes starting at start to 0 */
463*4882a593Smuzhiyun1:	li	r3,0
464*4882a593Smuzhiyun	mr	r4,r30		/* start, aka phys mem offset */
465*4882a593Smuzhiyun	li	r5,0x100
466*4882a593Smuzhiyun	li	r6,0
467*4882a593Smuzhiyun	bl	copy_and_flush	/* (dest, src, copy limit, start offset) */
468*4882a593Smuzhiyun1:	/* assume normal blr return */
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun	/* release other cpus to the new kernel secondary start at 0x60 */
471*4882a593Smuzhiyun	mflr	r5
472*4882a593Smuzhiyun	li	r6,1
473*4882a593Smuzhiyun	stw	r6,kexec_flag-1b(5)
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun	cmpdi	r27,0
476*4882a593Smuzhiyun	beq	1f
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun	/* clear out hardware hash page table and tlb */
479*4882a593Smuzhiyun#ifdef PPC64_ELF_ABI_v1
480*4882a593Smuzhiyun	ld	r12,0(r27)		/* deref function descriptor */
481*4882a593Smuzhiyun#else
482*4882a593Smuzhiyun	mr	r12,r27
483*4882a593Smuzhiyun#endif
484*4882a593Smuzhiyun	mtctr	r12
485*4882a593Smuzhiyun	bctrl				/* mmu_hash_ops.hpte_clear_all(void); */
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun/*
488*4882a593Smuzhiyun *   kexec image calling is:
489*4882a593Smuzhiyun *      the first 0x100 bytes of the entry point are copied to 0
490*4882a593Smuzhiyun *
491*4882a593Smuzhiyun *      all slaves branch to slave = 0x60 (absolute)
492*4882a593Smuzhiyun *              slave(phys_cpu_id);
493*4882a593Smuzhiyun *
494*4882a593Smuzhiyun *      master goes to start = entry point
495*4882a593Smuzhiyun *              start(phys_cpu_id, start, 0);
496*4882a593Smuzhiyun *
497*4882a593Smuzhiyun *
498*4882a593Smuzhiyun *   a wrapper is needed to call existing kernels, here is an approximate
499*4882a593Smuzhiyun *   description of one method:
500*4882a593Smuzhiyun *
501*4882a593Smuzhiyun * v2: (2.6.10)
502*4882a593Smuzhiyun *   start will be near the boot_block (maybe 0x100 bytes before it?)
503*4882a593Smuzhiyun *   it will have a 0x60, which will b to boot_block, where it will wait
504*4882a593Smuzhiyun *   and 0 will store phys into struct boot-block and load r3 from there,
505*4882a593Smuzhiyun *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
506*4882a593Smuzhiyun *
507*4882a593Smuzhiyun * v1: (2.6.9)
508*4882a593Smuzhiyun *    boot block will have all cpus scanning device tree to see if they
509*4882a593Smuzhiyun *    are the boot cpu ?????
510*4882a593Smuzhiyun *    other device tree differences (prop sizes, va vs pa, etc)...
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun1:	mr	r3,r25	# my phys cpu
513*4882a593Smuzhiyun	mr	r4,r30	# start, aka phys mem offset
514*4882a593Smuzhiyun	mtlr	4
515*4882a593Smuzhiyun	li	r5,0
516*4882a593Smuzhiyun	blr	/* image->start(physid, image->start, 0); */
517*4882a593Smuzhiyun#endif /* CONFIG_KEXEC_CORE */
518