xref: /OK3568_Linux_fs/kernel/arch/mips/oprofile/op_model_mipsxx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2004, 05, 06 by Ralf Baechle
7*4882a593Smuzhiyun  * Copyright (C) 2005 by MIPS Technologies, Inc.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include <linux/cpumask.h>
10*4882a593Smuzhiyun #include <linux/oprofile.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/smp.h>
13*4882a593Smuzhiyun #include <asm/irq_regs.h>
14*4882a593Smuzhiyun #include <asm/time.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "op_impl.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define M_PERFCTL_EVENT(event)		(((event) << MIPS_PERFCTRL_EVENT_S) & \
19*4882a593Smuzhiyun 					 MIPS_PERFCTRL_EVENT)
20*4882a593Smuzhiyun #define M_PERFCTL_VPEID(vpe)		((vpe)	  << MIPS_PERFCTRL_VPEID_S)
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define M_COUNTER_OVERFLOW		(1UL	  << 31)
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static int (*save_perf_irq)(void);
25*4882a593Smuzhiyun static int perfcount_irq;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * XLR has only one set of counters per core. Designate the
29*4882a593Smuzhiyun  * first hardware thread in the core for setup and init.
30*4882a593Smuzhiyun  * Skip CPUs with non-zero hardware thread id (4 hwt per core)
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
33*4882a593Smuzhiyun #define oprofile_skip_cpu(c)	((cpu_logical_map(c) & 0x3) != 0)
34*4882a593Smuzhiyun #else
35*4882a593Smuzhiyun #define oprofile_skip_cpu(c)	0
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #ifdef CONFIG_MIPS_MT_SMP
39*4882a593Smuzhiyun #define WHAT		(MIPS_PERFCTRL_MT_EN_VPE | \
40*4882a593Smuzhiyun 			 M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
41*4882a593Smuzhiyun #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
42*4882a593Smuzhiyun 			0 : cpu_vpe_id(&current_cpu_data))
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * The number of bits to shift to convert between counters per core and
46*4882a593Smuzhiyun  * counters per VPE.  There is no reasonable interface atm to obtain the
47*4882a593Smuzhiyun  * number of VPEs used by Linux and in the 34K this number is fixed to two
48*4882a593Smuzhiyun  * anyways so we hardcore a few things here for the moment.  The way it's
49*4882a593Smuzhiyun  * done here will ensure that oprofile VSMP kernel will run right on a lesser
50*4882a593Smuzhiyun  * core like a 24K also or with maxcpus=1.
51*4882a593Smuzhiyun  */
vpe_shift(void)52*4882a593Smuzhiyun static inline unsigned int vpe_shift(void)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	if (num_possible_cpus() > 1)
55*4882a593Smuzhiyun 		return 1;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	return 0;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #else
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define WHAT		0
63*4882a593Smuzhiyun #define vpe_id()	0
64*4882a593Smuzhiyun 
vpe_shift(void)65*4882a593Smuzhiyun static inline unsigned int vpe_shift(void)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	return 0;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 
counters_total_to_per_cpu(unsigned int counters)72*4882a593Smuzhiyun static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return counters >> vpe_shift();
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
counters_per_cpu_to_total(unsigned int counters)77*4882a593Smuzhiyun static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	return counters << vpe_shift();
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #define __define_perf_accessors(r, n, np)				\
83*4882a593Smuzhiyun 									\
84*4882a593Smuzhiyun static inline unsigned int r_c0_ ## r ## n(void)			\
85*4882a593Smuzhiyun {									\
86*4882a593Smuzhiyun 	unsigned int cpu = vpe_id();					\
87*4882a593Smuzhiyun 									\
88*4882a593Smuzhiyun 	switch (cpu) {							\
89*4882a593Smuzhiyun 	case 0:								\
90*4882a593Smuzhiyun 		return read_c0_ ## r ## n();				\
91*4882a593Smuzhiyun 	case 1:								\
92*4882a593Smuzhiyun 		return read_c0_ ## r ## np();				\
93*4882a593Smuzhiyun 	default:							\
94*4882a593Smuzhiyun 		BUG();							\
95*4882a593Smuzhiyun 	}								\
96*4882a593Smuzhiyun 	return 0;							\
97*4882a593Smuzhiyun }									\
98*4882a593Smuzhiyun 									\
99*4882a593Smuzhiyun static inline void w_c0_ ## r ## n(unsigned int value)			\
100*4882a593Smuzhiyun {									\
101*4882a593Smuzhiyun 	unsigned int cpu = vpe_id();					\
102*4882a593Smuzhiyun 									\
103*4882a593Smuzhiyun 	switch (cpu) {							\
104*4882a593Smuzhiyun 	case 0:								\
105*4882a593Smuzhiyun 		write_c0_ ## r ## n(value);				\
106*4882a593Smuzhiyun 		return;							\
107*4882a593Smuzhiyun 	case 1:								\
108*4882a593Smuzhiyun 		write_c0_ ## r ## np(value);				\
109*4882a593Smuzhiyun 		return;							\
110*4882a593Smuzhiyun 	default:							\
111*4882a593Smuzhiyun 		BUG();							\
112*4882a593Smuzhiyun 	}								\
113*4882a593Smuzhiyun 	return;								\
114*4882a593Smuzhiyun }									\
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun __define_perf_accessors(perfcntr, 0, 2)
117*4882a593Smuzhiyun __define_perf_accessors(perfcntr, 1, 3)
118*4882a593Smuzhiyun __define_perf_accessors(perfcntr, 2, 0)
119*4882a593Smuzhiyun __define_perf_accessors(perfcntr, 3, 1)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun __define_perf_accessors(perfctrl, 0, 2)
122*4882a593Smuzhiyun __define_perf_accessors(perfctrl, 1, 3)
123*4882a593Smuzhiyun __define_perf_accessors(perfctrl, 2, 0)
124*4882a593Smuzhiyun __define_perf_accessors(perfctrl, 3, 1)
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun struct op_mips_model op_model_mipsxx_ops;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun static struct mipsxx_register_config {
129*4882a593Smuzhiyun 	unsigned int control[4];
130*4882a593Smuzhiyun 	unsigned int counter[4];
131*4882a593Smuzhiyun } reg;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* Compute all of the registers in preparation for enabling profiling.	*/
134*4882a593Smuzhiyun 
mipsxx_reg_setup(struct op_counter_config * ctr)135*4882a593Smuzhiyun static void mipsxx_reg_setup(struct op_counter_config *ctr)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	unsigned int counters = op_model_mipsxx_ops.num_counters;
138*4882a593Smuzhiyun 	int i;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* Compute the performance counter control word.  */
141*4882a593Smuzhiyun 	for (i = 0; i < counters; i++) {
142*4882a593Smuzhiyun 		reg.control[i] = 0;
143*4882a593Smuzhiyun 		reg.counter[i] = 0;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 		if (!ctr[i].enabled)
146*4882a593Smuzhiyun 			continue;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
149*4882a593Smuzhiyun 				 MIPS_PERFCTRL_IE;
150*4882a593Smuzhiyun 		if (ctr[i].kernel)
151*4882a593Smuzhiyun 			reg.control[i] |= MIPS_PERFCTRL_K;
152*4882a593Smuzhiyun 		if (ctr[i].user)
153*4882a593Smuzhiyun 			reg.control[i] |= MIPS_PERFCTRL_U;
154*4882a593Smuzhiyun 		if (ctr[i].exl)
155*4882a593Smuzhiyun 			reg.control[i] |= MIPS_PERFCTRL_EXL;
156*4882a593Smuzhiyun 		if (boot_cpu_type() == CPU_XLR)
157*4882a593Smuzhiyun 			reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
158*4882a593Smuzhiyun 		reg.counter[i] = 0x80000000 - ctr[i].count;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /* Program all of the registers in preparation for enabling profiling.	*/
163*4882a593Smuzhiyun 
mipsxx_cpu_setup(void * args)164*4882a593Smuzhiyun static void mipsxx_cpu_setup(void *args)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	unsigned int counters = op_model_mipsxx_ops.num_counters;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (oprofile_skip_cpu(smp_processor_id()))
169*4882a593Smuzhiyun 		return;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	switch (counters) {
172*4882a593Smuzhiyun 	case 4:
173*4882a593Smuzhiyun 		w_c0_perfctrl3(0);
174*4882a593Smuzhiyun 		w_c0_perfcntr3(reg.counter[3]);
175*4882a593Smuzhiyun 		fallthrough;
176*4882a593Smuzhiyun 	case 3:
177*4882a593Smuzhiyun 		w_c0_perfctrl2(0);
178*4882a593Smuzhiyun 		w_c0_perfcntr2(reg.counter[2]);
179*4882a593Smuzhiyun 		fallthrough;
180*4882a593Smuzhiyun 	case 2:
181*4882a593Smuzhiyun 		w_c0_perfctrl1(0);
182*4882a593Smuzhiyun 		w_c0_perfcntr1(reg.counter[1]);
183*4882a593Smuzhiyun 		fallthrough;
184*4882a593Smuzhiyun 	case 1:
185*4882a593Smuzhiyun 		w_c0_perfctrl0(0);
186*4882a593Smuzhiyun 		w_c0_perfcntr0(reg.counter[0]);
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /* Start all counters on current CPU */
mipsxx_cpu_start(void * args)191*4882a593Smuzhiyun static void mipsxx_cpu_start(void *args)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	unsigned int counters = op_model_mipsxx_ops.num_counters;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	if (oprofile_skip_cpu(smp_processor_id()))
196*4882a593Smuzhiyun 		return;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	switch (counters) {
199*4882a593Smuzhiyun 	case 4:
200*4882a593Smuzhiyun 		w_c0_perfctrl3(WHAT | reg.control[3]);
201*4882a593Smuzhiyun 		fallthrough;
202*4882a593Smuzhiyun 	case 3:
203*4882a593Smuzhiyun 		w_c0_perfctrl2(WHAT | reg.control[2]);
204*4882a593Smuzhiyun 		fallthrough;
205*4882a593Smuzhiyun 	case 2:
206*4882a593Smuzhiyun 		w_c0_perfctrl1(WHAT | reg.control[1]);
207*4882a593Smuzhiyun 		fallthrough;
208*4882a593Smuzhiyun 	case 1:
209*4882a593Smuzhiyun 		w_c0_perfctrl0(WHAT | reg.control[0]);
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun /* Stop all counters on current CPU */
mipsxx_cpu_stop(void * args)214*4882a593Smuzhiyun static void mipsxx_cpu_stop(void *args)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	unsigned int counters = op_model_mipsxx_ops.num_counters;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (oprofile_skip_cpu(smp_processor_id()))
219*4882a593Smuzhiyun 		return;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	switch (counters) {
222*4882a593Smuzhiyun 	case 4:
223*4882a593Smuzhiyun 		w_c0_perfctrl3(0);
224*4882a593Smuzhiyun 		fallthrough;
225*4882a593Smuzhiyun 	case 3:
226*4882a593Smuzhiyun 		w_c0_perfctrl2(0);
227*4882a593Smuzhiyun 		fallthrough;
228*4882a593Smuzhiyun 	case 2:
229*4882a593Smuzhiyun 		w_c0_perfctrl1(0);
230*4882a593Smuzhiyun 		fallthrough;
231*4882a593Smuzhiyun 	case 1:
232*4882a593Smuzhiyun 		w_c0_perfctrl0(0);
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
mipsxx_perfcount_handler(void)236*4882a593Smuzhiyun static int mipsxx_perfcount_handler(void)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	unsigned int counters = op_model_mipsxx_ops.num_counters;
239*4882a593Smuzhiyun 	unsigned int control;
240*4882a593Smuzhiyun 	unsigned int counter;
241*4882a593Smuzhiyun 	int handled = IRQ_NONE;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
244*4882a593Smuzhiyun 		return handled;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	switch (counters) {
247*4882a593Smuzhiyun #define HANDLE_COUNTER(n)						\
248*4882a593Smuzhiyun 	case n + 1:							\
249*4882a593Smuzhiyun 		control = r_c0_perfctrl ## n();				\
250*4882a593Smuzhiyun 		counter = r_c0_perfcntr ## n();				\
251*4882a593Smuzhiyun 		if ((control & MIPS_PERFCTRL_IE) &&			\
252*4882a593Smuzhiyun 		    (counter & M_COUNTER_OVERFLOW)) {			\
253*4882a593Smuzhiyun 			oprofile_add_sample(get_irq_regs(), n);		\
254*4882a593Smuzhiyun 			w_c0_perfcntr ## n(reg.counter[n]);		\
255*4882a593Smuzhiyun 			handled = IRQ_HANDLED;				\
256*4882a593Smuzhiyun 		}
257*4882a593Smuzhiyun 	HANDLE_COUNTER(3)
258*4882a593Smuzhiyun 	fallthrough;
259*4882a593Smuzhiyun 	HANDLE_COUNTER(2)
260*4882a593Smuzhiyun 	fallthrough;
261*4882a593Smuzhiyun 	HANDLE_COUNTER(1)
262*4882a593Smuzhiyun 	fallthrough;
263*4882a593Smuzhiyun 	HANDLE_COUNTER(0)
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	return handled;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
__n_counters(void)269*4882a593Smuzhiyun static inline int __n_counters(void)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	if (!cpu_has_perf)
272*4882a593Smuzhiyun 		return 0;
273*4882a593Smuzhiyun 	if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
274*4882a593Smuzhiyun 		return 1;
275*4882a593Smuzhiyun 	if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
276*4882a593Smuzhiyun 		return 2;
277*4882a593Smuzhiyun 	if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
278*4882a593Smuzhiyun 		return 3;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	return 4;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
n_counters(void)283*4882a593Smuzhiyun static inline int n_counters(void)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	int counters;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	switch (current_cpu_type()) {
288*4882a593Smuzhiyun 	case CPU_R10000:
289*4882a593Smuzhiyun 		counters = 2;
290*4882a593Smuzhiyun 		break;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	case CPU_R12000:
293*4882a593Smuzhiyun 	case CPU_R14000:
294*4882a593Smuzhiyun 	case CPU_R16000:
295*4882a593Smuzhiyun 		counters = 4;
296*4882a593Smuzhiyun 		break;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	default:
299*4882a593Smuzhiyun 		counters = __n_counters();
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	return counters;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
reset_counters(void * arg)305*4882a593Smuzhiyun static void reset_counters(void *arg)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	int counters = (int)(long)arg;
308*4882a593Smuzhiyun 	switch (counters) {
309*4882a593Smuzhiyun 	case 4:
310*4882a593Smuzhiyun 		w_c0_perfctrl3(0);
311*4882a593Smuzhiyun 		w_c0_perfcntr3(0);
312*4882a593Smuzhiyun 		fallthrough;
313*4882a593Smuzhiyun 	case 3:
314*4882a593Smuzhiyun 		w_c0_perfctrl2(0);
315*4882a593Smuzhiyun 		w_c0_perfcntr2(0);
316*4882a593Smuzhiyun 		fallthrough;
317*4882a593Smuzhiyun 	case 2:
318*4882a593Smuzhiyun 		w_c0_perfctrl1(0);
319*4882a593Smuzhiyun 		w_c0_perfcntr1(0);
320*4882a593Smuzhiyun 		fallthrough;
321*4882a593Smuzhiyun 	case 1:
322*4882a593Smuzhiyun 		w_c0_perfctrl0(0);
323*4882a593Smuzhiyun 		w_c0_perfcntr0(0);
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
mipsxx_perfcount_int(int irq,void * dev_id)327*4882a593Smuzhiyun static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	return mipsxx_perfcount_handler();
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
mipsxx_init(void)332*4882a593Smuzhiyun static int __init mipsxx_init(void)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	int counters;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	counters = n_counters();
337*4882a593Smuzhiyun 	if (counters == 0) {
338*4882a593Smuzhiyun 		printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
339*4882a593Smuzhiyun 		return -ENODEV;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun #ifdef CONFIG_MIPS_MT_SMP
343*4882a593Smuzhiyun 	if (!cpu_has_mipsmt_pertccounters)
344*4882a593Smuzhiyun 		counters = counters_total_to_per_cpu(counters);
345*4882a593Smuzhiyun #endif
346*4882a593Smuzhiyun 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	op_model_mipsxx_ops.num_counters = counters;
349*4882a593Smuzhiyun 	switch (current_cpu_type()) {
350*4882a593Smuzhiyun 	case CPU_M14KC:
351*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
352*4882a593Smuzhiyun 		break;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	case CPU_M14KEC:
355*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
356*4882a593Smuzhiyun 		break;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	case CPU_20KC:
359*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/20K";
360*4882a593Smuzhiyun 		break;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	case CPU_24K:
363*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/24K";
364*4882a593Smuzhiyun 		break;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	case CPU_25KF:
367*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/25K";
368*4882a593Smuzhiyun 		break;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	case CPU_1004K:
371*4882a593Smuzhiyun 	case CPU_34K:
372*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/34K";
373*4882a593Smuzhiyun 		break;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	case CPU_1074K:
376*4882a593Smuzhiyun 	case CPU_74K:
377*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/74K";
378*4882a593Smuzhiyun 		break;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	case CPU_INTERAPTIV:
381*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
382*4882a593Smuzhiyun 		break;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	case CPU_PROAPTIV:
385*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
386*4882a593Smuzhiyun 		break;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	case CPU_P5600:
389*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/P5600";
390*4882a593Smuzhiyun 		break;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	case CPU_I6400:
393*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/I6400";
394*4882a593Smuzhiyun 		break;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	case CPU_M5150:
397*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/M5150";
398*4882a593Smuzhiyun 		break;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	case CPU_5KC:
401*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/5K";
402*4882a593Smuzhiyun 		break;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	case CPU_R10000:
405*4882a593Smuzhiyun 		if ((current_cpu_data.processor_id & 0xff) == 0x20)
406*4882a593Smuzhiyun 			op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
407*4882a593Smuzhiyun 		else
408*4882a593Smuzhiyun 			op_model_mipsxx_ops.cpu_type = "mips/r10000";
409*4882a593Smuzhiyun 		break;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	case CPU_R12000:
412*4882a593Smuzhiyun 	case CPU_R14000:
413*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/r12000";
414*4882a593Smuzhiyun 		break;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	case CPU_R16000:
417*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/r16000";
418*4882a593Smuzhiyun 		break;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	case CPU_SB1:
421*4882a593Smuzhiyun 	case CPU_SB1A:
422*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/sb1";
423*4882a593Smuzhiyun 		break;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	case CPU_LOONGSON32:
426*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/loongson1";
427*4882a593Smuzhiyun 		break;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	case CPU_XLR:
430*4882a593Smuzhiyun 		op_model_mipsxx_ops.cpu_type = "mips/xlr";
431*4882a593Smuzhiyun 		break;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	default:
434*4882a593Smuzhiyun 		printk(KERN_ERR "Profiling unsupported for this CPU\n");
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		return -ENODEV;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	save_perf_irq = perf_irq;
440*4882a593Smuzhiyun 	perf_irq = mipsxx_perfcount_handler;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (get_c0_perfcount_int)
443*4882a593Smuzhiyun 		perfcount_irq = get_c0_perfcount_int();
444*4882a593Smuzhiyun 	else if (cp0_perfcount_irq >= 0)
445*4882a593Smuzhiyun 		perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
446*4882a593Smuzhiyun 	else
447*4882a593Smuzhiyun 		perfcount_irq = -1;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	if (perfcount_irq >= 0)
450*4882a593Smuzhiyun 		return request_irq(perfcount_irq, mipsxx_perfcount_int,
451*4882a593Smuzhiyun 				   IRQF_PERCPU | IRQF_NOBALANCING |
452*4882a593Smuzhiyun 				   IRQF_NO_THREAD | IRQF_NO_SUSPEND |
453*4882a593Smuzhiyun 				   IRQF_SHARED,
454*4882a593Smuzhiyun 				   "Perfcounter", save_perf_irq);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	return 0;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
mipsxx_exit(void)459*4882a593Smuzhiyun static void mipsxx_exit(void)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	int counters = op_model_mipsxx_ops.num_counters;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (perfcount_irq >= 0)
464*4882a593Smuzhiyun 		free_irq(perfcount_irq, save_perf_irq);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	counters = counters_per_cpu_to_total(counters);
467*4882a593Smuzhiyun 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	perf_irq = save_perf_irq;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun struct op_mips_model op_model_mipsxx_ops = {
473*4882a593Smuzhiyun 	.reg_setup	= mipsxx_reg_setup,
474*4882a593Smuzhiyun 	.cpu_setup	= mipsxx_cpu_setup,
475*4882a593Smuzhiyun 	.init		= mipsxx_init,
476*4882a593Smuzhiyun 	.exit		= mipsxx_exit,
477*4882a593Smuzhiyun 	.cpu_start	= mipsxx_cpu_start,
478*4882a593Smuzhiyun 	.cpu_stop	= mipsxx_cpu_stop,
479*4882a593Smuzhiyun };
480