xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/cell/pmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Cell Broadband Engine Performance Monitor
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * (C) Copyright IBM Corporation 2001,2006
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author:
8*4882a593Smuzhiyun  *    David Erb (djerb@us.ibm.com)
9*4882a593Smuzhiyun  *    Kevin Corry (kevcorry@us.ibm.com)
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/export.h>
15*4882a593Smuzhiyun #include <asm/io.h>
16*4882a593Smuzhiyun #include <asm/irq_regs.h>
17*4882a593Smuzhiyun #include <asm/machdep.h>
18*4882a593Smuzhiyun #include <asm/pmc.h>
19*4882a593Smuzhiyun #include <asm/reg.h>
20*4882a593Smuzhiyun #include <asm/spu.h>
21*4882a593Smuzhiyun #include <asm/cell-regs.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "interrupt.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * When writing to write-only mmio addresses, save a shadow copy. All of the
27*4882a593Smuzhiyun  * registers are 32-bit, but stored in the upper-half of a 64-bit field in
28*4882a593Smuzhiyun  * pmd_regs.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define WRITE_WO_MMIO(reg, x)					\
32*4882a593Smuzhiyun 	do {							\
33*4882a593Smuzhiyun 		u32 _x = (x);					\
34*4882a593Smuzhiyun 		struct cbe_pmd_regs __iomem *pmd_regs;		\
35*4882a593Smuzhiyun 		struct cbe_pmd_shadow_regs *shadow_regs;	\
36*4882a593Smuzhiyun 		pmd_regs = cbe_get_cpu_pmd_regs(cpu);		\
37*4882a593Smuzhiyun 		shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);	\
38*4882a593Smuzhiyun 		out_be64(&(pmd_regs->reg), (((u64)_x) << 32));	\
39*4882a593Smuzhiyun 		shadow_regs->reg = _x;				\
40*4882a593Smuzhiyun 	} while (0)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define READ_SHADOW_REG(val, reg)				\
43*4882a593Smuzhiyun 	do {							\
44*4882a593Smuzhiyun 		struct cbe_pmd_shadow_regs *shadow_regs;	\
45*4882a593Smuzhiyun 		shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);	\
46*4882a593Smuzhiyun 		(val) = shadow_regs->reg;			\
47*4882a593Smuzhiyun 	} while (0)
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define READ_MMIO_UPPER32(val, reg)				\
50*4882a593Smuzhiyun 	do {							\
51*4882a593Smuzhiyun 		struct cbe_pmd_regs __iomem *pmd_regs;		\
52*4882a593Smuzhiyun 		pmd_regs = cbe_get_cpu_pmd_regs(cpu);		\
53*4882a593Smuzhiyun 		(val) = (u32)(in_be64(&pmd_regs->reg) >> 32);	\
54*4882a593Smuzhiyun 	} while (0)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * Physical counter registers.
58*4882a593Smuzhiyun  * Each physical counter can act as one 32-bit counter or two 16-bit counters.
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun 
cbe_read_phys_ctr(u32 cpu,u32 phys_ctr)61*4882a593Smuzhiyun u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	u32 val_in_latch, val = 0;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (phys_ctr < NR_PHYS_CTRS) {
66*4882a593Smuzhiyun 		READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		/* Read the latch or the actual counter, whichever is newer. */
69*4882a593Smuzhiyun 		if (val_in_latch & (1 << phys_ctr)) {
70*4882a593Smuzhiyun 			READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
71*4882a593Smuzhiyun 		} else {
72*4882a593Smuzhiyun 			READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
73*4882a593Smuzhiyun 		}
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	return val;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
79*4882a593Smuzhiyun 
cbe_write_phys_ctr(u32 cpu,u32 phys_ctr,u32 val)80*4882a593Smuzhiyun void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	struct cbe_pmd_shadow_regs *shadow_regs;
83*4882a593Smuzhiyun 	u32 pm_ctrl;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (phys_ctr < NR_PHYS_CTRS) {
86*4882a593Smuzhiyun 		/* Writing to a counter only writes to a hardware latch.
87*4882a593Smuzhiyun 		 * The new value is not propagated to the actual counter
88*4882a593Smuzhiyun 		 * until the performance monitor is enabled.
89*4882a593Smuzhiyun 		 */
90*4882a593Smuzhiyun 		WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		pm_ctrl = cbe_read_pm(cpu, pm_control);
93*4882a593Smuzhiyun 		if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
94*4882a593Smuzhiyun 			/* The counters are already active, so we need to
95*4882a593Smuzhiyun 			 * rewrite the pm_control register to "re-enable"
96*4882a593Smuzhiyun 			 * the PMU.
97*4882a593Smuzhiyun 			 */
98*4882a593Smuzhiyun 			cbe_write_pm(cpu, pm_control, pm_ctrl);
99*4882a593Smuzhiyun 		} else {
100*4882a593Smuzhiyun 			shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
101*4882a593Smuzhiyun 			shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
102*4882a593Smuzhiyun 		}
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun  * "Logical" counter registers.
109*4882a593Smuzhiyun  * These will read/write 16-bits or 32-bits depending on the
110*4882a593Smuzhiyun  * current size of the counter. Counters 4 - 7 are always 16-bit.
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun 
cbe_read_ctr(u32 cpu,u32 ctr)113*4882a593Smuzhiyun u32 cbe_read_ctr(u32 cpu, u32 ctr)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	u32 val;
116*4882a593Smuzhiyun 	u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	val = cbe_read_phys_ctr(cpu, phys_ctr);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
121*4882a593Smuzhiyun 		val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	return val;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_read_ctr);
126*4882a593Smuzhiyun 
cbe_write_ctr(u32 cpu,u32 ctr,u32 val)127*4882a593Smuzhiyun void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	u32 phys_ctr;
130*4882a593Smuzhiyun 	u32 phys_val;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	phys_ctr = ctr & (NR_PHYS_CTRS - 1);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
135*4882a593Smuzhiyun 		phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		if (ctr < NR_PHYS_CTRS)
138*4882a593Smuzhiyun 			val = (val << 16) | (phys_val & 0xffff);
139*4882a593Smuzhiyun 		else
140*4882a593Smuzhiyun 			val = (val & 0xffff) | (phys_val & 0xffff0000);
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	cbe_write_phys_ctr(cpu, phys_ctr, val);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_write_ctr);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun  * Counter-control registers.
149*4882a593Smuzhiyun  * Each "logical" counter has a corresponding control register.
150*4882a593Smuzhiyun  */
151*4882a593Smuzhiyun 
cbe_read_pm07_control(u32 cpu,u32 ctr)152*4882a593Smuzhiyun u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	u32 pm07_control = 0;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	if (ctr < NR_CTRS)
157*4882a593Smuzhiyun 		READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	return pm07_control;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
162*4882a593Smuzhiyun 
cbe_write_pm07_control(u32 cpu,u32 ctr,u32 val)163*4882a593Smuzhiyun void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	if (ctr < NR_CTRS)
166*4882a593Smuzhiyun 		WRITE_WO_MMIO(pm07_control[ctr], val);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun  * Other PMU control registers. Most of these are write-only.
172*4882a593Smuzhiyun  */
173*4882a593Smuzhiyun 
cbe_read_pm(u32 cpu,enum pm_reg_name reg)174*4882a593Smuzhiyun u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	u32 val = 0;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	switch (reg) {
179*4882a593Smuzhiyun 	case group_control:
180*4882a593Smuzhiyun 		READ_SHADOW_REG(val, group_control);
181*4882a593Smuzhiyun 		break;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	case debug_bus_control:
184*4882a593Smuzhiyun 		READ_SHADOW_REG(val, debug_bus_control);
185*4882a593Smuzhiyun 		break;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	case trace_address:
188*4882a593Smuzhiyun 		READ_MMIO_UPPER32(val, trace_address);
189*4882a593Smuzhiyun 		break;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	case ext_tr_timer:
192*4882a593Smuzhiyun 		READ_SHADOW_REG(val, ext_tr_timer);
193*4882a593Smuzhiyun 		break;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	case pm_status:
196*4882a593Smuzhiyun 		READ_MMIO_UPPER32(val, pm_status);
197*4882a593Smuzhiyun 		break;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	case pm_control:
200*4882a593Smuzhiyun 		READ_SHADOW_REG(val, pm_control);
201*4882a593Smuzhiyun 		break;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	case pm_interval:
204*4882a593Smuzhiyun 		READ_MMIO_UPPER32(val, pm_interval);
205*4882a593Smuzhiyun 		break;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	case pm_start_stop:
208*4882a593Smuzhiyun 		READ_SHADOW_REG(val, pm_start_stop);
209*4882a593Smuzhiyun 		break;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return val;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_read_pm);
215*4882a593Smuzhiyun 
cbe_write_pm(u32 cpu,enum pm_reg_name reg,u32 val)216*4882a593Smuzhiyun void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	switch (reg) {
219*4882a593Smuzhiyun 	case group_control:
220*4882a593Smuzhiyun 		WRITE_WO_MMIO(group_control, val);
221*4882a593Smuzhiyun 		break;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	case debug_bus_control:
224*4882a593Smuzhiyun 		WRITE_WO_MMIO(debug_bus_control, val);
225*4882a593Smuzhiyun 		break;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	case trace_address:
228*4882a593Smuzhiyun 		WRITE_WO_MMIO(trace_address, val);
229*4882a593Smuzhiyun 		break;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	case ext_tr_timer:
232*4882a593Smuzhiyun 		WRITE_WO_MMIO(ext_tr_timer, val);
233*4882a593Smuzhiyun 		break;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	case pm_status:
236*4882a593Smuzhiyun 		WRITE_WO_MMIO(pm_status, val);
237*4882a593Smuzhiyun 		break;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	case pm_control:
240*4882a593Smuzhiyun 		WRITE_WO_MMIO(pm_control, val);
241*4882a593Smuzhiyun 		break;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	case pm_interval:
244*4882a593Smuzhiyun 		WRITE_WO_MMIO(pm_interval, val);
245*4882a593Smuzhiyun 		break;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	case pm_start_stop:
248*4882a593Smuzhiyun 		WRITE_WO_MMIO(pm_start_stop, val);
249*4882a593Smuzhiyun 		break;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_write_pm);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun  * Get/set the size of a physical counter to either 16 or 32 bits.
256*4882a593Smuzhiyun  */
257*4882a593Smuzhiyun 
cbe_get_ctr_size(u32 cpu,u32 phys_ctr)258*4882a593Smuzhiyun u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	u32 pm_ctrl, size = 0;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (phys_ctr < NR_PHYS_CTRS) {
263*4882a593Smuzhiyun 		pm_ctrl = cbe_read_pm(cpu, pm_control);
264*4882a593Smuzhiyun 		size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return size;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
270*4882a593Smuzhiyun 
cbe_set_ctr_size(u32 cpu,u32 phys_ctr,u32 ctr_size)271*4882a593Smuzhiyun void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	u32 pm_ctrl;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (phys_ctr < NR_PHYS_CTRS) {
276*4882a593Smuzhiyun 		pm_ctrl = cbe_read_pm(cpu, pm_control);
277*4882a593Smuzhiyun 		switch (ctr_size) {
278*4882a593Smuzhiyun 		case 16:
279*4882a593Smuzhiyun 			pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
280*4882a593Smuzhiyun 			break;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		case 32:
283*4882a593Smuzhiyun 			pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
284*4882a593Smuzhiyun 			break;
285*4882a593Smuzhiyun 		}
286*4882a593Smuzhiyun 		cbe_write_pm(cpu, pm_control, pm_ctrl);
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun  * Enable/disable the entire performance monitoring unit.
293*4882a593Smuzhiyun  * When we enable the PMU, all pending writes to counters get committed.
294*4882a593Smuzhiyun  */
295*4882a593Smuzhiyun 
cbe_enable_pm(u32 cpu)296*4882a593Smuzhiyun void cbe_enable_pm(u32 cpu)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct cbe_pmd_shadow_regs *shadow_regs;
299*4882a593Smuzhiyun 	u32 pm_ctrl;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
302*4882a593Smuzhiyun 	shadow_regs->counter_value_in_latch = 0;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
305*4882a593Smuzhiyun 	cbe_write_pm(cpu, pm_control, pm_ctrl);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_enable_pm);
308*4882a593Smuzhiyun 
cbe_disable_pm(u32 cpu)309*4882a593Smuzhiyun void cbe_disable_pm(u32 cpu)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	u32 pm_ctrl;
312*4882a593Smuzhiyun 	pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
313*4882a593Smuzhiyun 	cbe_write_pm(cpu, pm_control, pm_ctrl);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_disable_pm);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun  * Reading from the trace_buffer.
319*4882a593Smuzhiyun  * The trace buffer is two 64-bit registers. Reading from
320*4882a593Smuzhiyun  * the second half automatically increments the trace_address.
321*4882a593Smuzhiyun  */
322*4882a593Smuzhiyun 
cbe_read_trace_buffer(u32 cpu,u64 * buf)323*4882a593Smuzhiyun void cbe_read_trace_buffer(u32 cpu, u64 *buf)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	*buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
328*4882a593Smuzhiyun 	*buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun  * Enabling/disabling interrupts for the entire performance monitoring unit.
334*4882a593Smuzhiyun  */
335*4882a593Smuzhiyun 
cbe_get_and_clear_pm_interrupts(u32 cpu)336*4882a593Smuzhiyun u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	/* Reading pm_status clears the interrupt bits. */
339*4882a593Smuzhiyun 	return cbe_read_pm(cpu, pm_status);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts);
342*4882a593Smuzhiyun 
cbe_enable_pm_interrupts(u32 cpu,u32 thread,u32 mask)343*4882a593Smuzhiyun void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	/* Set which node and thread will handle the next interrupt. */
346*4882a593Smuzhiyun 	iic_set_interrupt_routing(cpu, thread, 0);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	/* Enable the interrupt bits in the pm_status register. */
349*4882a593Smuzhiyun 	if (mask)
350*4882a593Smuzhiyun 		cbe_write_pm(cpu, pm_status, mask);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
353*4882a593Smuzhiyun 
cbe_disable_pm_interrupts(u32 cpu)354*4882a593Smuzhiyun void cbe_disable_pm_interrupts(u32 cpu)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	cbe_get_and_clear_pm_interrupts(cpu);
357*4882a593Smuzhiyun 	cbe_write_pm(cpu, pm_status, 0);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
360*4882a593Smuzhiyun 
cbe_pm_irq(int irq,void * dev_id)361*4882a593Smuzhiyun static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	perf_irq(get_irq_regs());
364*4882a593Smuzhiyun 	return IRQ_HANDLED;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
cbe_init_pm_irq(void)367*4882a593Smuzhiyun static int __init cbe_init_pm_irq(void)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	unsigned int irq;
370*4882a593Smuzhiyun 	int rc, node;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	for_each_online_node(node) {
373*4882a593Smuzhiyun 		irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
374*4882a593Smuzhiyun 					       (node << IIC_IRQ_NODE_SHIFT));
375*4882a593Smuzhiyun 		if (!irq) {
376*4882a593Smuzhiyun 			printk("ERROR: Unable to allocate irq for node %d\n",
377*4882a593Smuzhiyun 			       node);
378*4882a593Smuzhiyun 			return -EINVAL;
379*4882a593Smuzhiyun 		}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 		rc = request_irq(irq, cbe_pm_irq,
382*4882a593Smuzhiyun 				 0, "cbe-pmu-0", NULL);
383*4882a593Smuzhiyun 		if (rc) {
384*4882a593Smuzhiyun 			printk("ERROR: Request for irq on node %d failed\n",
385*4882a593Smuzhiyun 			       node);
386*4882a593Smuzhiyun 			return rc;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	return 0;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun machine_arch_initcall(cell, cbe_init_pm_irq);
393*4882a593Smuzhiyun 
cbe_sync_irq(int node)394*4882a593Smuzhiyun void cbe_sync_irq(int node)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	unsigned int irq;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	irq = irq_find_mapping(NULL,
399*4882a593Smuzhiyun 			       IIC_IRQ_IOEX_PMI
400*4882a593Smuzhiyun 			       | (node << IIC_IRQ_NODE_SHIFT));
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (!irq) {
403*4882a593Smuzhiyun 		printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
404*4882a593Smuzhiyun 		"for node %d\n", irq, node);
405*4882a593Smuzhiyun 		return;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	synchronize_irq(irq);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cbe_sync_irq);
411*4882a593Smuzhiyun 
412