xref: /OK3568_Linux_fs/kernel/arch/mips/netlogic/common/reset.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/*
2*4882a593Smuzhiyun * Copyright 2003-2013 Broadcom Corporation.
3*4882a593Smuzhiyun * All Rights Reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the Broadcom
9*4882a593Smuzhiyun * license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
12*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
13*4882a593Smuzhiyun * are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
16*4882a593Smuzhiyun *    notice, this list of conditions and the following disclaimer.
17*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce the above copyright
18*4882a593Smuzhiyun *    notice, this list of conditions and the following disclaimer in
19*4882a593Smuzhiyun *    the documentation and/or other materials provided with the
20*4882a593Smuzhiyun *    distribution.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23*4882a593Smuzhiyun * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24*4882a593Smuzhiyun * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25*4882a593Smuzhiyun * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26*4882a593Smuzhiyun * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27*4882a593Smuzhiyun * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28*4882a593Smuzhiyun * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29*4882a593Smuzhiyun * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30*4882a593Smuzhiyun * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31*4882a593Smuzhiyun * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32*4882a593Smuzhiyun * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun#include <asm/asm.h>
37*4882a593Smuzhiyun#include <asm/asm-offsets.h>
38*4882a593Smuzhiyun#include <asm/cpu.h>
39*4882a593Smuzhiyun#include <asm/cacheops.h>
40*4882a593Smuzhiyun#include <asm/regdef.h>
41*4882a593Smuzhiyun#include <asm/mipsregs.h>
42*4882a593Smuzhiyun#include <asm/stackframe.h>
43*4882a593Smuzhiyun#include <asm/asmmacro.h>
44*4882a593Smuzhiyun#include <asm/addrspace.h>
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun#include <asm/netlogic/common.h>
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun#include <asm/netlogic/xlp-hal/iomap.h>
49*4882a593Smuzhiyun#include <asm/netlogic/xlp-hal/xlp.h>
50*4882a593Smuzhiyun#include <asm/netlogic/xlp-hal/sys.h>
51*4882a593Smuzhiyun#include <asm/netlogic/xlp-hal/cpucontrol.h>
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun#define SYS_CPU_COHERENT_BASE	CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
54*4882a593Smuzhiyun			XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
55*4882a593Smuzhiyun			SYS_CPU_NONCOHERENT_MODE * 4
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun/* Enable XLP features and workarounds in the LSU */
58*4882a593Smuzhiyun.macro xlp_config_lsu
59*4882a593Smuzhiyun	li	t0, LSU_DEFEATURE
60*4882a593Smuzhiyun	mfcr	t1, t0
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun	lui	t2, 0x4080	/* Enable Unaligned Access, L2HPE */
63*4882a593Smuzhiyun	or	t1, t1, t2
64*4882a593Smuzhiyun	mtcr	t1, t0
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun	li	t0, ICU_DEFEATURE
67*4882a593Smuzhiyun	mfcr	t1, t0
68*4882a593Smuzhiyun	ori	t1, 0x1000	/* Enable Icache partitioning */
69*4882a593Smuzhiyun	mtcr	t1, t0
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun	li	t0, SCHED_DEFEATURE
72*4882a593Smuzhiyun	lui	t1, 0x0100	/* Disable BRU accepting ALU ops */
73*4882a593Smuzhiyun	mtcr	t1, t0
74*4882a593Smuzhiyun.endm
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun/*
77*4882a593Smuzhiyun * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
78*4882a593Smuzhiyun * register. This is needed before going to C code since the SP can
79*4882a593Smuzhiyun * in this region. Called from all HW threads.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun.macro xlp_early_mmu_init
82*4882a593Smuzhiyun	mfc0	t0, CP0_PAGEMASK, 1
83*4882a593Smuzhiyun	li	t1, (1 << 29)		/* ELPA bit */
84*4882a593Smuzhiyun	or	t0, t1
85*4882a593Smuzhiyun	mtc0	t0, CP0_PAGEMASK, 1
86*4882a593Smuzhiyun.endm
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun/*
89*4882a593Smuzhiyun * L1D cache has to be flushed before enabling threads in XLP.
90*4882a593Smuzhiyun * On XLP8xx/XLP3xx, we do a low level flush using processor control
91*4882a593Smuzhiyun * registers. On XLPII CPUs, usual cache instructions work.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun.macro	xlp_flush_l1_dcache
94*4882a593Smuzhiyun	mfc0	t0, CP0_PRID
95*4882a593Smuzhiyun	andi	t0, t0, PRID_IMP_MASK
96*4882a593Smuzhiyun	slt	t1, t0, 0x1200
97*4882a593Smuzhiyun	beqz	t1, 15f
98*4882a593Smuzhiyun	nop
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun	/* XLP8xx low level cache flush */
101*4882a593Smuzhiyun	li	t0, LSU_DEBUG_DATA0
102*4882a593Smuzhiyun	li	t1, LSU_DEBUG_ADDR
103*4882a593Smuzhiyun	li	t2, 0		/* index */
104*4882a593Smuzhiyun	li	t3, 0x1000	/* loop count */
105*4882a593Smuzhiyun11:
106*4882a593Smuzhiyun	sll	v0, t2, 5
107*4882a593Smuzhiyun	mtcr	zero, t0
108*4882a593Smuzhiyun	ori	v1, v0, 0x3	/* way0 | write_enable | write_active */
109*4882a593Smuzhiyun	mtcr	v1, t1
110*4882a593Smuzhiyun12:
111*4882a593Smuzhiyun	mfcr	v1, t1
112*4882a593Smuzhiyun	andi	v1, 0x1		/* wait for write_active == 0 */
113*4882a593Smuzhiyun	bnez	v1, 12b
114*4882a593Smuzhiyun	nop
115*4882a593Smuzhiyun	mtcr	zero, t0
116*4882a593Smuzhiyun	ori	v1, v0, 0x7	/* way1 | write_enable | write_active */
117*4882a593Smuzhiyun	mtcr	v1, t1
118*4882a593Smuzhiyun13:
119*4882a593Smuzhiyun	mfcr	v1, t1
120*4882a593Smuzhiyun	andi	v1, 0x1		/* wait for write_active == 0 */
121*4882a593Smuzhiyun	bnez	v1, 13b
122*4882a593Smuzhiyun	nop
123*4882a593Smuzhiyun	addi	t2, 1
124*4882a593Smuzhiyun	bne	t3, t2, 11b
125*4882a593Smuzhiyun	nop
126*4882a593Smuzhiyun	b	17f
127*4882a593Smuzhiyun	nop
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun	/* XLPII CPUs, Invalidate all 64k of L1 D-cache */
130*4882a593Smuzhiyun15:
131*4882a593Smuzhiyun	li	t0, 0x80000000
132*4882a593Smuzhiyun	li	t1, 0x80010000
133*4882a593Smuzhiyun16:	cache	Index_Writeback_Inv_D, 0(t0)
134*4882a593Smuzhiyun	addiu	t0, t0, 32
135*4882a593Smuzhiyun	bne	t0, t1, 16b
136*4882a593Smuzhiyun	nop
137*4882a593Smuzhiyun17:
138*4882a593Smuzhiyun.endm
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun/*
141*4882a593Smuzhiyun * nlm_reset_entry will be copied to the reset entry point for
142*4882a593Smuzhiyun * XLR and XLP. The XLP cores start here when they are woken up. This
143*4882a593Smuzhiyun * is also the NMI entry point.
144*4882a593Smuzhiyun *
145*4882a593Smuzhiyun * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
148*4882a593Smuzhiyun * location, this will have the thread mask (used when core is woken up)
149*4882a593Smuzhiyun * and the current NMI handler in case we reached here for an NMI.
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * When a core or thread is newly woken up, it marks itself ready and
152*4882a593Smuzhiyun * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
153*4882a593Smuzhiyun * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun	.set	noreorder
156*4882a593Smuzhiyun	.set	noat
157*4882a593Smuzhiyun	.set	arch=xlr	/* for mfcr/mtcr, XLR is sufficient */
158*4882a593Smuzhiyun
159*4882a593SmuzhiyunFEXPORT(nlm_reset_entry)
160*4882a593Smuzhiyun	dmtc0	k0, $22, 6
161*4882a593Smuzhiyun	dmtc0	k1, $22, 7
162*4882a593Smuzhiyun	mfc0	k0, CP0_STATUS
163*4882a593Smuzhiyun	li	k1, 0x80000
164*4882a593Smuzhiyun	and	k1, k0, k1
165*4882a593Smuzhiyun	beqz	k1, 1f		/* go to real reset entry */
166*4882a593Smuzhiyun	nop
167*4882a593Smuzhiyun	li	k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
168*4882a593Smuzhiyun	ld	k0, BOOT_NMI_HANDLER(k1)
169*4882a593Smuzhiyun	jr	k0
170*4882a593Smuzhiyun	nop
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun1:	/* Entry point on core wakeup */
173*4882a593Smuzhiyun	mfc0	t0, CP0_PRID		/* processor ID */
174*4882a593Smuzhiyun	andi	t0, PRID_IMP_MASK
175*4882a593Smuzhiyun	li	t1, 0x1500		/* XLP 9xx */
176*4882a593Smuzhiyun	beq	t0, t1, 2f		/* does not need to set coherent */
177*4882a593Smuzhiyun	nop
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun	li	t1, 0x1300		/* XLP 5xx */
180*4882a593Smuzhiyun	beq	t0, t1, 2f		/* does not need to set coherent */
181*4882a593Smuzhiyun	nop
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun	/* set bit in SYS coherent register for the core */
184*4882a593Smuzhiyun	mfc0	t0, CP0_EBASE
185*4882a593Smuzhiyun	mfc0	t1, CP0_EBASE
186*4882a593Smuzhiyun	srl	t1, 5
187*4882a593Smuzhiyun	andi	t1, 0x3			/* t1 <- node */
188*4882a593Smuzhiyun	li	t2, 0x40000
189*4882a593Smuzhiyun	mul	t3, t2, t1		/* t3 = node * 0x40000 */
190*4882a593Smuzhiyun	srl	t0, t0, 2
191*4882a593Smuzhiyun	and	t0, t0, 0x7		/* t0 <- core */
192*4882a593Smuzhiyun	li	t1, 0x1
193*4882a593Smuzhiyun	sll	t0, t1, t0
194*4882a593Smuzhiyun	nor	t0, t0, zero		/* t0 <- ~(1 << core) */
195*4882a593Smuzhiyun	li	t2, SYS_CPU_COHERENT_BASE
196*4882a593Smuzhiyun	add	t2, t2, t3		/* t2 <- SYS offset for node */
197*4882a593Smuzhiyun	lw	t1, 0(t2)
198*4882a593Smuzhiyun	and	t1, t1, t0
199*4882a593Smuzhiyun	sw	t1, 0(t2)
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun	/* read back to ensure complete */
202*4882a593Smuzhiyun	lw	t1, 0(t2)
203*4882a593Smuzhiyun	sync
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun2:
206*4882a593Smuzhiyun	/* Configure LSU on Non-0 Cores. */
207*4882a593Smuzhiyun	xlp_config_lsu
208*4882a593Smuzhiyun	/* FALL THROUGH */
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun/*
211*4882a593Smuzhiyun * Wake up sibling threads from the initial thread in a core.
212*4882a593Smuzhiyun */
213*4882a593SmuzhiyunEXPORT(nlm_boot_siblings)
214*4882a593Smuzhiyun	/* core L1D flush before enable threads */
215*4882a593Smuzhiyun	xlp_flush_l1_dcache
216*4882a593Smuzhiyun	/* save ra and sp, will be used later (only for boot cpu) */
217*4882a593Smuzhiyun	dmtc0	ra, $22, 6
218*4882a593Smuzhiyun	dmtc0	sp, $22, 7
219*4882a593Smuzhiyun	/* Enable hw threads by writing to MAP_THREADMODE of the core */
220*4882a593Smuzhiyun	li	t0, CKSEG1ADDR(RESET_DATA_PHYS)
221*4882a593Smuzhiyun	lw	t1, BOOT_THREAD_MODE(t0)	/* t1 <- thread mode */
222*4882a593Smuzhiyun	li	t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
223*4882a593Smuzhiyun	mfcr	t2, t0
224*4882a593Smuzhiyun	or	t2, t2, t1
225*4882a593Smuzhiyun	mtcr	t2, t0
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun	/*
228*4882a593Smuzhiyun	 * The new hardware thread starts at the next instruction
229*4882a593Smuzhiyun	 * For all the cases other than core 0 thread 0, we will
230*4882a593Smuzhiyun	 * jump to the secondary wait function.
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun	 * NOTE: All GPR contents are lost after the mtcr above!
233*4882a593Smuzhiyun	 */
234*4882a593Smuzhiyun	mfc0	v0, CP0_EBASE
235*4882a593Smuzhiyun	andi	v0, 0x3ff		/* v0 <- node/core */
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun	/*
238*4882a593Smuzhiyun	 * Errata: to avoid potential live lock, setup IFU_BRUB_RESERVE
239*4882a593Smuzhiyun	 * when running 4 threads per core
240*4882a593Smuzhiyun	 */
241*4882a593Smuzhiyun	andi	v1, v0, 0x3             /* v1 <- thread id */
242*4882a593Smuzhiyun	bnez	v1, 2f
243*4882a593Smuzhiyun	nop
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun	/* thread 0 of each core. */
246*4882a593Smuzhiyun	li	t0, CKSEG1ADDR(RESET_DATA_PHYS)
247*4882a593Smuzhiyun	lw	t1, BOOT_THREAD_MODE(t0)        /* t1 <- thread mode */
248*4882a593Smuzhiyun	subu	t1, 0x3				/* 4-thread per core mode? */
249*4882a593Smuzhiyun	bnez	t1, 2f
250*4882a593Smuzhiyun	nop
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun	li	t0, IFU_BRUB_RESERVE
253*4882a593Smuzhiyun	li	t1, 0x55
254*4882a593Smuzhiyun	mtcr	t1, t0
255*4882a593Smuzhiyun	_ehb
256*4882a593Smuzhiyun2:
257*4882a593Smuzhiyun	beqz	v0, 4f		/* boot cpu (cpuid == 0)? */
258*4882a593Smuzhiyun	nop
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun	/* setup status reg */
261*4882a593Smuzhiyun	move	t1, zero
262*4882a593Smuzhiyun#ifdef CONFIG_64BIT
263*4882a593Smuzhiyun	ori	t1, ST0_KX
264*4882a593Smuzhiyun#endif
265*4882a593Smuzhiyun	mtc0	t1, CP0_STATUS
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun	xlp_early_mmu_init
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun	/* mark CPU ready */
270*4882a593Smuzhiyun	li	t3, CKSEG1ADDR(RESET_DATA_PHYS)
271*4882a593Smuzhiyun	ADDIU	t1, t3, BOOT_CPU_READY
272*4882a593Smuzhiyun	sll	v1, v0, 2
273*4882a593Smuzhiyun	PTR_ADDU t1, v1
274*4882a593Smuzhiyun	li	t2, 1
275*4882a593Smuzhiyun	sw	t2, 0(t1)
276*4882a593Smuzhiyun	/* Wait until NMI hits */
277*4882a593Smuzhiyun3:	wait
278*4882a593Smuzhiyun	b	3b
279*4882a593Smuzhiyun	nop
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun	/*
282*4882a593Smuzhiyun	 * For the boot CPU, we have to restore ra and sp and return, rest
283*4882a593Smuzhiyun	 * of the registers will be restored by the caller
284*4882a593Smuzhiyun	 */
285*4882a593Smuzhiyun4:
286*4882a593Smuzhiyun	dmfc0	ra, $22, 6
287*4882a593Smuzhiyun	dmfc0	sp, $22, 7
288*4882a593Smuzhiyun	jr	ra
289*4882a593Smuzhiyun	nop
290*4882a593SmuzhiyunEXPORT(nlm_reset_entry_end)
291*4882a593Smuzhiyun
292*4882a593SmuzhiyunLEAF(nlm_init_boot_cpu)
293*4882a593Smuzhiyun#ifdef CONFIG_CPU_XLP
294*4882a593Smuzhiyun	xlp_config_lsu
295*4882a593Smuzhiyun	xlp_early_mmu_init
296*4882a593Smuzhiyun#endif
297*4882a593Smuzhiyun	jr	ra
298*4882a593Smuzhiyun	nop
299*4882a593SmuzhiyunEND(nlm_init_boot_cpu)
300