xref: /OK3568_Linux_fs/kernel/arch/powerpc/purgatory/trampoline_64.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * kexec trampoline
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Based on code taken from kexec-tools and kexec-lite.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation
8*4882a593Smuzhiyun * Copyright (C) 2006, Mohan Kumar M, IBM Corporation
9*4882a593Smuzhiyun * Copyright (C) 2013, Anton Blanchard, IBM Corporation
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun#include <asm/asm-compat.h>
13*4882a593Smuzhiyun#include <asm/crashdump-ppc64.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun	.machine ppc64
16*4882a593Smuzhiyun	.balign 256
17*4882a593Smuzhiyun	.globl purgatory_start
18*4882a593Smuzhiyunpurgatory_start:
19*4882a593Smuzhiyun	b	master
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun	/* ABI: possible run_at_load flag at 0x5c */
22*4882a593Smuzhiyun	.org purgatory_start + 0x5c
23*4882a593Smuzhiyun	.globl run_at_load
24*4882a593Smuzhiyunrun_at_load:
25*4882a593Smuzhiyun	.long 0
26*4882a593Smuzhiyun	.size run_at_load, . - run_at_load
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun	/* ABI: slaves start at 60 with r3=phys */
29*4882a593Smuzhiyun	.org purgatory_start + 0x60
30*4882a593Smuzhiyunslave:
31*4882a593Smuzhiyun	b .
32*4882a593Smuzhiyun	/* ABI: end of copied region */
33*4882a593Smuzhiyun	.org purgatory_start + 0x100
34*4882a593Smuzhiyun	.size purgatory_start, . - purgatory_start
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun/*
37*4882a593Smuzhiyun * The above 0x100 bytes at purgatory_start are replaced with the
38*4882a593Smuzhiyun * code from the kernel (or next stage) by setup_purgatory().
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyunmaster:
42*4882a593Smuzhiyun	or	%r1,%r1,%r1	/* low priority to let other threads catchup */
43*4882a593Smuzhiyun	isync
44*4882a593Smuzhiyun	mr	%r17,%r3	/* save cpu id to r17 */
45*4882a593Smuzhiyun	mr	%r15,%r4	/* save physical address in reg15 */
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun	/* Work out where we're running */
48*4882a593Smuzhiyun	bcl	20, 31, 0f
49*4882a593Smuzhiyun0:	mflr	%r18
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun	/*
52*4882a593Smuzhiyun	 * Copy BACKUP_SRC_SIZE bytes from BACKUP_SRC_START to
53*4882a593Smuzhiyun	 * backup_start 8 bytes at a time.
54*4882a593Smuzhiyun	 *
55*4882a593Smuzhiyun	 * Use r3 = dest, r4 = src, r5 = size, r6 = count
56*4882a593Smuzhiyun	 */
57*4882a593Smuzhiyun	ld	%r3, (backup_start - 0b)(%r18)
58*4882a593Smuzhiyun	cmpdi	%cr0, %r3, 0
59*4882a593Smuzhiyun	beq	.Lskip_copy	/* skip if there is no backup region */
60*4882a593Smuzhiyun	lis	%r5, BACKUP_SRC_SIZE@h
61*4882a593Smuzhiyun	ori	%r5, %r5, BACKUP_SRC_SIZE@l
62*4882a593Smuzhiyun	cmpdi	%cr0, %r5, 0
63*4882a593Smuzhiyun	beq	.Lskip_copy	/* skip if copy size is zero */
64*4882a593Smuzhiyun	lis	%r4, BACKUP_SRC_START@h
65*4882a593Smuzhiyun	ori	%r4, %r4, BACKUP_SRC_START@l
66*4882a593Smuzhiyun	li	%r6, 0
67*4882a593Smuzhiyun.Lcopy_loop:
68*4882a593Smuzhiyun	ldx	%r0, %r6, %r4
69*4882a593Smuzhiyun	stdx	%r0, %r6, %r3
70*4882a593Smuzhiyun	addi	%r6, %r6, 8
71*4882a593Smuzhiyun	cmpld	%cr0, %r6, %r5
72*4882a593Smuzhiyun	blt	.Lcopy_loop
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun.Lskip_copy:
75*4882a593Smuzhiyun	or	%r3,%r3,%r3	/* ok now to high priority, lets boot */
76*4882a593Smuzhiyun	lis	%r6,0x1
77*4882a593Smuzhiyun	mtctr	%r6		/* delay a bit for slaves to catch up */
78*4882a593Smuzhiyun	bdnz	.		/* before we overwrite 0-100 again */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun	/* load device-tree address */
81*4882a593Smuzhiyun	ld	%r3, (dt_offset - 0b)(%r18)
82*4882a593Smuzhiyun	mr	%r16,%r3	/* save dt address in reg16 */
83*4882a593Smuzhiyun	li	%r4,20
84*4882a593Smuzhiyun	LWZX_BE	%r6,%r3,%r4	/* fetch __be32 version number at byte 20 */
85*4882a593Smuzhiyun	cmpwi	%cr0,%r6,2	/* v2 or later? */
86*4882a593Smuzhiyun	blt	1f
87*4882a593Smuzhiyun	li	%r4,28
88*4882a593Smuzhiyun	STWX_BE	%r17,%r3,%r4	/* Store my cpu as __be32 at byte 28 */
89*4882a593Smuzhiyun1:
90*4882a593Smuzhiyun	/* Load opal base and entry values in r8 & r9 respectively */
91*4882a593Smuzhiyun	ld	%r8,(opal_base - 0b)(%r18)
92*4882a593Smuzhiyun	ld	%r9,(opal_entry - 0b)(%r18)
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun	/* load the kernel address */
95*4882a593Smuzhiyun	ld	%r4,(kernel - 0b)(%r18)
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun	/* load the run_at_load flag */
98*4882a593Smuzhiyun	/* possibly patched by kexec */
99*4882a593Smuzhiyun	ld	%r6,(run_at_load - 0b)(%r18)
100*4882a593Smuzhiyun	/* and patch it into the kernel */
101*4882a593Smuzhiyun	stw	%r6,(0x5c)(%r4)
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun	mr	%r3,%r16	/* restore dt address */
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun	li	%r5,0		/* r5 will be 0 for kernel */
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun	mfmsr	%r11
108*4882a593Smuzhiyun	andi.	%r10,%r11,1	/* test MSR_LE */
109*4882a593Smuzhiyun	bne	.Little_endian
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun	mtctr	%r4		/* prepare branch to */
112*4882a593Smuzhiyun	bctr			/* start kernel */
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun.Little_endian:
115*4882a593Smuzhiyun	mtsrr0	%r4		/* prepare branch to */
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun	clrrdi	%r11,%r11,1	/* clear MSR_LE */
118*4882a593Smuzhiyun	mtsrr1	%r11
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun	rfid			/* update MSR and start kernel */
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun	.balign 8
123*4882a593Smuzhiyun	.globl kernel
124*4882a593Smuzhiyunkernel:
125*4882a593Smuzhiyun	.8byte  0x0
126*4882a593Smuzhiyun	.size kernel, . - kernel
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun	.balign 8
129*4882a593Smuzhiyun	.globl dt_offset
130*4882a593Smuzhiyundt_offset:
131*4882a593Smuzhiyun	.8byte  0x0
132*4882a593Smuzhiyun	.size dt_offset, . - dt_offset
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun	.balign 8
135*4882a593Smuzhiyun	.globl backup_start
136*4882a593Smuzhiyunbackup_start:
137*4882a593Smuzhiyun	.8byte  0x0
138*4882a593Smuzhiyun	.size backup_start, . - backup_start
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun	.balign 8
141*4882a593Smuzhiyun	.globl opal_base
142*4882a593Smuzhiyunopal_base:
143*4882a593Smuzhiyun	.8byte  0x0
144*4882a593Smuzhiyun	.size opal_base, . - opal_base
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun	.balign 8
147*4882a593Smuzhiyun	.globl opal_entry
148*4882a593Smuzhiyunopal_entry:
149*4882a593Smuzhiyun	.8byte  0x0
150*4882a593Smuzhiyun	.size opal_entry, . - opal_entry
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun	.data
153*4882a593Smuzhiyun	.balign 8
154*4882a593Smuzhiyun.globl purgatory_sha256_digest
155*4882a593Smuzhiyunpurgatory_sha256_digest:
156*4882a593Smuzhiyun	.skip	32
157*4882a593Smuzhiyun	.size purgatory_sha256_digest, . - purgatory_sha256_digest
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun	.balign 8
160*4882a593Smuzhiyun.globl purgatory_sha_regions
161*4882a593Smuzhiyunpurgatory_sha_regions:
162*4882a593Smuzhiyun	.skip	8 * 2 * 16
163*4882a593Smuzhiyun	.size purgatory_sha_regions, . - purgatory_sha_regions
164