1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3*4882a593Smuzhiyun * reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the NetLogic
9*4882a593Smuzhiyun * license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
12*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
13*4882a593Smuzhiyun * are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
16*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
17*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce the above copyright
18*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
19*4882a593Smuzhiyun * the documentation and/or other materials provided with the
20*4882a593Smuzhiyun * distribution.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23*4882a593Smuzhiyun * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24*4882a593Smuzhiyun * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25*4882a593Smuzhiyun * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26*4882a593Smuzhiyun * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27*4882a593Smuzhiyun * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28*4882a593Smuzhiyun * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29*4882a593Smuzhiyun * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30*4882a593Smuzhiyun * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31*4882a593Smuzhiyun * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32*4882a593Smuzhiyun * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/kernel.h>
36*4882a593Smuzhiyun #include <linux/threads.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <asm/asm.h>
39*4882a593Smuzhiyun #include <asm/asm-offsets.h>
40*4882a593Smuzhiyun #include <asm/mipsregs.h>
41*4882a593Smuzhiyun #include <asm/addrspace.h>
42*4882a593Smuzhiyun #include <asm/string.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include <asm/netlogic/haldefs.h>
45*4882a593Smuzhiyun #include <asm/netlogic/common.h>
46*4882a593Smuzhiyun #include <asm/netlogic/mips-extns.h>
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/iomap.h>
49*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/xlp.h>
50*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/pic.h>
51*4882a593Smuzhiyun #include <asm/netlogic/xlp-hal/sys.h>
52*4882a593Smuzhiyun
xlp_wakeup_core(uint64_t sysbase,int node,int core)53*4882a593Smuzhiyun static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun uint32_t coremask, value;
56*4882a593Smuzhiyun int count, resetreg;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun coremask = (1 << core);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Enable CPU clock in case of 8xx/3xx */
61*4882a593Smuzhiyun if (!cpu_is_xlpii()) {
62*4882a593Smuzhiyun value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
63*4882a593Smuzhiyun value &= ~coremask;
64*4882a593Smuzhiyun nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* On 9XX, mark coherent first */
68*4882a593Smuzhiyun if (cpu_is_xlp9xx()) {
69*4882a593Smuzhiyun value = nlm_read_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE);
70*4882a593Smuzhiyun value &= ~coremask;
71*4882a593Smuzhiyun nlm_write_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE, value);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Remove CPU Reset */
75*4882a593Smuzhiyun resetreg = cpu_is_xlp9xx() ? SYS_9XX_CPU_RESET : SYS_CPU_RESET;
76*4882a593Smuzhiyun value = nlm_read_sys_reg(sysbase, resetreg);
77*4882a593Smuzhiyun value &= ~coremask;
78*4882a593Smuzhiyun nlm_write_sys_reg(sysbase, resetreg, value);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* We are done on 9XX */
81*4882a593Smuzhiyun if (cpu_is_xlp9xx())
82*4882a593Smuzhiyun return 1;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Poll for CPU to mark itself coherent on other type of XLP */
85*4882a593Smuzhiyun count = 100000;
86*4882a593Smuzhiyun do {
87*4882a593Smuzhiyun value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
88*4882a593Smuzhiyun } while ((value & coremask) != 0 && --count > 0);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun return count != 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
wait_for_cpus(int cpu,int bootcpu)93*4882a593Smuzhiyun static int wait_for_cpus(int cpu, int bootcpu)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun volatile uint32_t *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
96*4882a593Smuzhiyun int i, count, notready;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun count = 0x800000;
99*4882a593Smuzhiyun do {
100*4882a593Smuzhiyun notready = nlm_threads_per_core;
101*4882a593Smuzhiyun for (i = 0; i < nlm_threads_per_core; i++)
102*4882a593Smuzhiyun if (cpu_ready[cpu + i] || (cpu + i) == bootcpu)
103*4882a593Smuzhiyun --notready;
104*4882a593Smuzhiyun } while (notready != 0 && --count > 0);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun return count != 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
xlp_enable_secondary_cores(const cpumask_t * wakeup_mask)109*4882a593Smuzhiyun static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct nlm_soc_info *nodep;
112*4882a593Smuzhiyun uint64_t syspcibase, fusebase;
113*4882a593Smuzhiyun uint32_t syscoremask, mask, fusemask;
114*4882a593Smuzhiyun int core, n, cpu, ncores;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun for (n = 0; n < NLM_NR_NODES; n++) {
117*4882a593Smuzhiyun if (n != 0) {
118*4882a593Smuzhiyun /* check if node exists and is online */
119*4882a593Smuzhiyun if (cpu_is_xlp9xx()) {
120*4882a593Smuzhiyun int b = xlp9xx_get_socbus(n);
121*4882a593Smuzhiyun pr_info("Node %d SoC PCI bus %d.\n", n, b);
122*4882a593Smuzhiyun if (b == 0)
123*4882a593Smuzhiyun break;
124*4882a593Smuzhiyun } else {
125*4882a593Smuzhiyun syspcibase = nlm_get_sys_pcibase(n);
126*4882a593Smuzhiyun if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
127*4882a593Smuzhiyun break;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun nlm_node_init(n);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* read cores in reset from SYS */
133*4882a593Smuzhiyun nodep = nlm_get_node(n);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (cpu_is_xlp9xx()) {
136*4882a593Smuzhiyun fusebase = nlm_get_fuse_regbase(n);
137*4882a593Smuzhiyun fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
138*4882a593Smuzhiyun switch (read_c0_prid() & PRID_IMP_MASK) {
139*4882a593Smuzhiyun case PRID_IMP_NETLOGIC_XLP5XX:
140*4882a593Smuzhiyun mask = 0xff;
141*4882a593Smuzhiyun break;
142*4882a593Smuzhiyun case PRID_IMP_NETLOGIC_XLP9XX:
143*4882a593Smuzhiyun default:
144*4882a593Smuzhiyun mask = 0xfffff;
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun } else {
148*4882a593Smuzhiyun fusemask = nlm_read_sys_reg(nodep->sysbase,
149*4882a593Smuzhiyun SYS_EFUSE_DEVICE_CFG_STATUS0);
150*4882a593Smuzhiyun switch (read_c0_prid() & PRID_IMP_MASK) {
151*4882a593Smuzhiyun case PRID_IMP_NETLOGIC_XLP3XX:
152*4882a593Smuzhiyun mask = 0xf;
153*4882a593Smuzhiyun break;
154*4882a593Smuzhiyun case PRID_IMP_NETLOGIC_XLP2XX:
155*4882a593Smuzhiyun mask = 0x3;
156*4882a593Smuzhiyun break;
157*4882a593Smuzhiyun case PRID_IMP_NETLOGIC_XLP8XX:
158*4882a593Smuzhiyun default:
159*4882a593Smuzhiyun mask = 0xff;
160*4882a593Smuzhiyun break;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Fused out cores are set in the fusemask, and the remaining
166*4882a593Smuzhiyun * cores are renumbered to range 0 .. nactive-1
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
171*4882a593Smuzhiyun ncores = nlm_cores_per_node();
172*4882a593Smuzhiyun for (core = 0; core < ncores; core++) {
173*4882a593Smuzhiyun /* we will be on node 0 core 0 */
174*4882a593Smuzhiyun if (n == 0 && core == 0)
175*4882a593Smuzhiyun continue;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* see if the core exists */
178*4882a593Smuzhiyun if ((syscoremask & (1 << core)) == 0)
179*4882a593Smuzhiyun continue;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* see if at least the first hw thread is enabled */
182*4882a593Smuzhiyun cpu = (n * ncores + core) * NLM_THREADS_PER_CORE;
183*4882a593Smuzhiyun if (!cpumask_test_cpu(cpu, wakeup_mask))
184*4882a593Smuzhiyun continue;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* wake up the core */
187*4882a593Smuzhiyun if (!xlp_wakeup_core(nodep->sysbase, n, core))
188*4882a593Smuzhiyun continue;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* core is up */
191*4882a593Smuzhiyun nodep->coremask |= 1u << core;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* spin until the hw threads sets their ready */
194*4882a593Smuzhiyun if (!wait_for_cpus(cpu, 0))
195*4882a593Smuzhiyun pr_err("Node %d : timeout core %d\n", n, core);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
xlp_wakeup_secondary_cpus(void)200*4882a593Smuzhiyun void xlp_wakeup_secondary_cpus(void)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun * In case of u-boot, the secondaries are in reset
204*4882a593Smuzhiyun * first wakeup core 0 threads
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun xlp_boot_core0_siblings();
207*4882a593Smuzhiyun if (!wait_for_cpus(0, 0))
208*4882a593Smuzhiyun pr_err("Node 0 : timeout core 0\n");
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* now get other cores out of reset */
211*4882a593Smuzhiyun xlp_enable_secondary_cores(&nlm_cpumask);
212*4882a593Smuzhiyun }
213