1*4882a593Smuzhiyun /**
2*4882a593Smuzhiyun * @file arch/alpha/oprofile/op_model_ev4.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * @remark Copyright 2002 OProfile authors
5*4882a593Smuzhiyun * @remark Read the file COPYING
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * @author Richard Henderson <rth@twiddle.net>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/oprofile.h>
11*4882a593Smuzhiyun #include <linux/smp.h>
12*4882a593Smuzhiyun #include <asm/ptrace.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include "op_impl.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* Compute all of the registers in preparation for enabling profiling. */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static void
ev4_reg_setup(struct op_register_config * reg,struct op_counter_config * ctr,struct op_system_config * sys)20*4882a593Smuzhiyun ev4_reg_setup(struct op_register_config *reg,
21*4882a593Smuzhiyun struct op_counter_config *ctr,
22*4882a593Smuzhiyun struct op_system_config *sys)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun unsigned long ctl = 0, count, hilo;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* Select desired events. We've mapped the event numbers
27*4882a593Smuzhiyun such that they fit directly into the event selection fields.
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun Note that there is no "off" setting. In both cases we select
30*4882a593Smuzhiyun the EXTERNAL event source, hoping that it'll be the lowest
31*4882a593Smuzhiyun frequency, and set the frequency counter to LOW. The interrupts
32*4882a593Smuzhiyun for these "disabled" counter overflows are ignored by the
33*4882a593Smuzhiyun interrupt handler.
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun This is most irritating, because the hardware *can* enable and
36*4882a593Smuzhiyun disable the interrupts for these counters independently, but the
37*4882a593Smuzhiyun wrperfmon interface doesn't allow it. */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun ctl |= (ctr[0].enabled ? ctr[0].event << 8 : 14 << 8);
40*4882a593Smuzhiyun ctl |= (ctr[1].enabled ? (ctr[1].event - 16) << 32 : 7ul << 32);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* EV4 can not read or write its counter registers. The only
43*4882a593Smuzhiyun thing one can do at all is see if you overflow and get an
44*4882a593Smuzhiyun interrupt. We can set the width of the counters, to some
45*4882a593Smuzhiyun extent. Take the interrupt count selected by the user,
46*4882a593Smuzhiyun map it onto one of the possible values, and write it back. */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun count = ctr[0].count;
49*4882a593Smuzhiyun if (count <= 4096)
50*4882a593Smuzhiyun count = 4096, hilo = 1;
51*4882a593Smuzhiyun else
52*4882a593Smuzhiyun count = 65536, hilo = 0;
53*4882a593Smuzhiyun ctr[0].count = count;
54*4882a593Smuzhiyun ctl |= (ctr[0].enabled && hilo) << 3;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun count = ctr[1].count;
57*4882a593Smuzhiyun if (count <= 256)
58*4882a593Smuzhiyun count = 256, hilo = 1;
59*4882a593Smuzhiyun else
60*4882a593Smuzhiyun count = 4096, hilo = 0;
61*4882a593Smuzhiyun ctr[1].count = count;
62*4882a593Smuzhiyun ctl |= (ctr[1].enabled && hilo);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun reg->mux_select = ctl;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Select performance monitoring options. */
67*4882a593Smuzhiyun /* ??? Need to come up with some mechanism to trace only
68*4882a593Smuzhiyun selected processes. EV4 does not have a mechanism to
69*4882a593Smuzhiyun select kernel or user mode only. For now, enable always. */
70*4882a593Smuzhiyun reg->proc_mode = 0;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Frequency is folded into mux_select for EV4. */
73*4882a593Smuzhiyun reg->freq = 0;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* See above regarding no writes. */
76*4882a593Smuzhiyun reg->reset_values = 0;
77*4882a593Smuzhiyun reg->need_reset = 0;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Program all of the registers in preparation for enabling profiling. */
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static void
ev4_cpu_setup(void * x)84*4882a593Smuzhiyun ev4_cpu_setup(void *x)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct op_register_config *reg = x;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun wrperfmon(2, reg->mux_select);
89*4882a593Smuzhiyun wrperfmon(3, reg->proc_mode);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun static void
ev4_handle_interrupt(unsigned long which,struct pt_regs * regs,struct op_counter_config * ctr)93*4882a593Smuzhiyun ev4_handle_interrupt(unsigned long which, struct pt_regs *regs,
94*4882a593Smuzhiyun struct op_counter_config *ctr)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun /* EV4 can't properly disable counters individually.
97*4882a593Smuzhiyun Discard "disabled" events now. */
98*4882a593Smuzhiyun if (!ctr[which].enabled)
99*4882a593Smuzhiyun return;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Record the sample. */
102*4882a593Smuzhiyun oprofile_add_sample(regs, which);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun struct op_axp_model op_model_ev4 = {
107*4882a593Smuzhiyun .reg_setup = ev4_reg_setup,
108*4882a593Smuzhiyun .cpu_setup = ev4_cpu_setup,
109*4882a593Smuzhiyun .reset_ctr = NULL,
110*4882a593Smuzhiyun .handle_interrupt = ev4_handle_interrupt,
111*4882a593Smuzhiyun .cpu_type = "alpha/ev4",
112*4882a593Smuzhiyun .num_counters = 2,
113*4882a593Smuzhiyun .can_set_proc_mode = 0,
114*4882a593Smuzhiyun };
115