xref: /OK3568_Linux_fs/kernel/arch/mips/sgi-ip27/ip27-nmi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/kernel.h>
3*4882a593Smuzhiyun #include <linux/mmzone.h>
4*4882a593Smuzhiyun #include <linux/nodemask.h>
5*4882a593Smuzhiyun #include <linux/spinlock.h>
6*4882a593Smuzhiyun #include <linux/smp.h>
7*4882a593Smuzhiyun #include <linux/atomic.h>
8*4882a593Smuzhiyun #include <asm/sn/types.h>
9*4882a593Smuzhiyun #include <asm/sn/addrs.h>
10*4882a593Smuzhiyun #include <asm/sn/nmi.h>
11*4882a593Smuzhiyun #include <asm/sn/arch.h>
12*4882a593Smuzhiyun #include <asm/sn/agent.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #if 0
15*4882a593Smuzhiyun #define NODE_NUM_CPUS(n)	CNODE_NUM_CPUS(n)
16*4882a593Smuzhiyun #else
17*4882a593Smuzhiyun #define NODE_NUM_CPUS(n)	CPUS_PER_NODE
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define SEND_NMI(_nasid, _slice)	\
21*4882a593Smuzhiyun 	REMOTE_HUB_S((_nasid),  (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun typedef unsigned long machreg_t;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * Let's see what else we need to do here. Set up sp, gp?
29*4882a593Smuzhiyun  */
nmi_dump(void)30*4882a593Smuzhiyun void nmi_dump(void)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	void cont_nmi_dump(void);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	cont_nmi_dump();
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
install_cpu_nmi_handler(int slice)37*4882a593Smuzhiyun void install_cpu_nmi_handler(int slice)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	nmi_t *nmi_addr;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
42*4882a593Smuzhiyun 	if (nmi_addr->call_addr)
43*4882a593Smuzhiyun 		return;
44*4882a593Smuzhiyun 	nmi_addr->magic = NMI_MAGIC;
45*4882a593Smuzhiyun 	nmi_addr->call_addr = (void *)nmi_dump;
46*4882a593Smuzhiyun 	nmi_addr->call_addr_c =
47*4882a593Smuzhiyun 		(void *)(~((unsigned long)(nmi_addr->call_addr)));
48*4882a593Smuzhiyun 	nmi_addr->call_parm = 0;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * Copy the cpu registers which have been saved in the IP27prom format
53*4882a593Smuzhiyun  * into the eframe format for the node under consideration.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
nmi_cpu_eframe_save(nasid_t nasid,int slice)56*4882a593Smuzhiyun void nmi_cpu_eframe_save(nasid_t nasid, int slice)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	struct reg_struct *nr;
59*4882a593Smuzhiyun 	int		i;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* Get the pointer to the current cpu's register set. */
62*4882a593Smuzhiyun 	nr = (struct reg_struct *)
63*4882a593Smuzhiyun 		(TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
64*4882a593Smuzhiyun 		slice * IP27_NMI_KREGS_CPU_SIZE);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/*
69*4882a593Smuzhiyun 	 * Saved main processor registers
70*4882a593Smuzhiyun 	 */
71*4882a593Smuzhiyun 	for (i = 0; i < 32; ) {
72*4882a593Smuzhiyun 		if ((i % 4) == 0)
73*4882a593Smuzhiyun 			pr_emerg("$%2d   :", i);
74*4882a593Smuzhiyun 		pr_cont(" %016lx", nr->gpr[i]);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		i++;
77*4882a593Smuzhiyun 		if ((i % 4) == 0)
78*4882a593Smuzhiyun 			pr_cont("\n");
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	pr_emerg("Hi    : (value lost)\n");
82*4882a593Smuzhiyun 	pr_emerg("Lo    : (value lost)\n");
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/*
85*4882a593Smuzhiyun 	 * Saved cp0 registers
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	pr_emerg("epc   : %016lx %pS\n", nr->epc, (void *)nr->epc);
88*4882a593Smuzhiyun 	pr_emerg("%s\n", print_tainted());
89*4882a593Smuzhiyun 	pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
90*4882a593Smuzhiyun 	pr_emerg("ra    : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
91*4882a593Smuzhiyun 	pr_emerg("Status: %08lx	      ", nr->sr);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (nr->sr & ST0_KX)
94*4882a593Smuzhiyun 		pr_cont("KX ");
95*4882a593Smuzhiyun 	if (nr->sr & ST0_SX)
96*4882a593Smuzhiyun 		pr_cont("SX ");
97*4882a593Smuzhiyun 	if (nr->sr & ST0_UX)
98*4882a593Smuzhiyun 		pr_cont("UX ");
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	switch (nr->sr & ST0_KSU) {
101*4882a593Smuzhiyun 	case KSU_USER:
102*4882a593Smuzhiyun 		pr_cont("USER ");
103*4882a593Smuzhiyun 		break;
104*4882a593Smuzhiyun 	case KSU_SUPERVISOR:
105*4882a593Smuzhiyun 		pr_cont("SUPERVISOR ");
106*4882a593Smuzhiyun 		break;
107*4882a593Smuzhiyun 	case KSU_KERNEL:
108*4882a593Smuzhiyun 		pr_cont("KERNEL ");
109*4882a593Smuzhiyun 		break;
110*4882a593Smuzhiyun 	default:
111*4882a593Smuzhiyun 		pr_cont("BAD_MODE ");
112*4882a593Smuzhiyun 		break;
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (nr->sr & ST0_ERL)
116*4882a593Smuzhiyun 		pr_cont("ERL ");
117*4882a593Smuzhiyun 	if (nr->sr & ST0_EXL)
118*4882a593Smuzhiyun 		pr_cont("EXL ");
119*4882a593Smuzhiyun 	if (nr->sr & ST0_IE)
120*4882a593Smuzhiyun 		pr_cont("IE ");
121*4882a593Smuzhiyun 	pr_cont("\n");
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	pr_emerg("Cause : %08lx\n", nr->cause);
124*4882a593Smuzhiyun 	pr_emerg("PrId  : %08x\n", read_c0_prid());
125*4882a593Smuzhiyun 	pr_emerg("BadVA : %016lx\n", nr->badva);
126*4882a593Smuzhiyun 	pr_emerg("CErr  : %016lx\n", nr->cache_err);
127*4882a593Smuzhiyun 	pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	pr_emerg("\n");
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
nmi_dump_hub_irq(nasid_t nasid,int slice)132*4882a593Smuzhiyun void nmi_dump_hub_irq(nasid_t nasid, int slice)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	u64 mask0, mask1, pend0, pend1;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	if (slice == 0) {				/* Slice A */
137*4882a593Smuzhiyun 		mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
138*4882a593Smuzhiyun 		mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
139*4882a593Smuzhiyun 	} else {					/* Slice B */
140*4882a593Smuzhiyun 		mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
141*4882a593Smuzhiyun 		mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
145*4882a593Smuzhiyun 	pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
148*4882a593Smuzhiyun 	pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
149*4882a593Smuzhiyun 	pr_emerg("\n\n");
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun  * Copy the cpu registers which have been saved in the IP27prom format
154*4882a593Smuzhiyun  * into the eframe format for the node under consideration.
155*4882a593Smuzhiyun  */
nmi_node_eframe_save(nasid_t nasid)156*4882a593Smuzhiyun void nmi_node_eframe_save(nasid_t nasid)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	int slice;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (nasid == INVALID_NASID)
161*4882a593Smuzhiyun 		return;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/* Save the registers into eframe for each cpu */
164*4882a593Smuzhiyun 	for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
165*4882a593Smuzhiyun 		nmi_cpu_eframe_save(nasid, slice);
166*4882a593Smuzhiyun 		nmi_dump_hub_irq(nasid, slice);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun  * Save the nmi cpu registers for all cpus in the system.
172*4882a593Smuzhiyun  */
173*4882a593Smuzhiyun void
nmi_eframes_save(void)174*4882a593Smuzhiyun nmi_eframes_save(void)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	nasid_t nasid;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	for_each_online_node(nasid)
179*4882a593Smuzhiyun 		nmi_node_eframe_save(nasid);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun void
cont_nmi_dump(void)183*4882a593Smuzhiyun cont_nmi_dump(void)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun #ifndef REAL_NMI_SIGNAL
186*4882a593Smuzhiyun 	static atomic_t nmied_cpus = ATOMIC_INIT(0);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	atomic_inc(&nmied_cpus);
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun 	/*
191*4882a593Smuzhiyun 	 * Only allow 1 cpu to proceed
192*4882a593Smuzhiyun 	 */
193*4882a593Smuzhiyun 	arch_spin_lock(&nmi_lock);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun #ifdef REAL_NMI_SIGNAL
196*4882a593Smuzhiyun 	/*
197*4882a593Smuzhiyun 	 * Wait up to 15 seconds for the other cpus to respond to the NMI.
198*4882a593Smuzhiyun 	 * If a cpu has not responded after 10 sec, send it 1 additional NMI.
199*4882a593Smuzhiyun 	 * This is for 2 reasons:
200*4882a593Smuzhiyun 	 *	- sometimes a MMSC fail to NMI all cpus.
201*4882a593Smuzhiyun 	 *	- on 512p SN0 system, the MMSC will only send NMIs to
202*4882a593Smuzhiyun 	 *	  half the cpus. Unfortunately, we don't know which cpus may be
203*4882a593Smuzhiyun 	 *	  NMIed - it depends on how the site chooses to configure.
204*4882a593Smuzhiyun 	 *
205*4882a593Smuzhiyun 	 * Note: it has been measure that it takes the MMSC up to 2.3 secs to
206*4882a593Smuzhiyun 	 * send NMIs to all cpus on a 256p system.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	for (i=0; i < 1500; i++) {
209*4882a593Smuzhiyun 		for_each_online_node(node)
210*4882a593Smuzhiyun 			if (NODEPDA(node)->dump_count == 0)
211*4882a593Smuzhiyun 				break;
212*4882a593Smuzhiyun 		if (node == MAX_NUMNODES)
213*4882a593Smuzhiyun 			break;
214*4882a593Smuzhiyun 		if (i == 1000) {
215*4882a593Smuzhiyun 			for_each_online_node(node)
216*4882a593Smuzhiyun 				if (NODEPDA(node)->dump_count == 0) {
217*4882a593Smuzhiyun 					cpu = cpumask_first(cpumask_of_node(node));
218*4882a593Smuzhiyun 					for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
219*4882a593Smuzhiyun 						CPUMASK_SETB(nmied_cpus, cpu);
220*4882a593Smuzhiyun 						/*
221*4882a593Smuzhiyun 						 * cputonasid, cputoslice
222*4882a593Smuzhiyun 						 * needs kernel cpuid
223*4882a593Smuzhiyun 						 */
224*4882a593Smuzhiyun 						SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
225*4882a593Smuzhiyun 					}
226*4882a593Smuzhiyun 				}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 		}
229*4882a593Smuzhiyun 		udelay(10000);
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun #else
232*4882a593Smuzhiyun 	while (atomic_read(&nmied_cpus) != num_online_cpus());
233*4882a593Smuzhiyun #endif
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/*
236*4882a593Smuzhiyun 	 * Save the nmi cpu registers for all cpu in the eframe format.
237*4882a593Smuzhiyun 	 */
238*4882a593Smuzhiyun 	nmi_eframes_save();
239*4882a593Smuzhiyun 	LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
240*4882a593Smuzhiyun }
241