1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Xtensa hardware breakpoints/watchpoints handling functions
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
5*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
6*4882a593Smuzhiyun * for more details.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2016 Cadence Design Systems Inc.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/hw_breakpoint.h>
12*4882a593Smuzhiyun #include <linux/log2.h>
13*4882a593Smuzhiyun #include <linux/percpu.h>
14*4882a593Smuzhiyun #include <linux/perf_event.h>
15*4882a593Smuzhiyun #include <asm/core.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* Breakpoint currently in use for each IBREAKA. */
18*4882a593Smuzhiyun static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* Watchpoint currently in use for each DBREAKA. */
21*4882a593Smuzhiyun static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]);
22*4882a593Smuzhiyun
hw_breakpoint_slots(int type)23*4882a593Smuzhiyun int hw_breakpoint_slots(int type)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun switch (type) {
26*4882a593Smuzhiyun case TYPE_INST:
27*4882a593Smuzhiyun return XCHAL_NUM_IBREAK;
28*4882a593Smuzhiyun case TYPE_DATA:
29*4882a593Smuzhiyun return XCHAL_NUM_DBREAK;
30*4882a593Smuzhiyun default:
31*4882a593Smuzhiyun pr_warn("unknown slot type: %d\n", type);
32*4882a593Smuzhiyun return 0;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
arch_check_bp_in_kernelspace(struct arch_hw_breakpoint * hw)36*4882a593Smuzhiyun int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun unsigned int len;
39*4882a593Smuzhiyun unsigned long va;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun va = hw->address;
42*4882a593Smuzhiyun len = hw->len;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * Construct an arch_hw_breakpoint from a perf_event.
49*4882a593Smuzhiyun */
hw_breakpoint_arch_parse(struct perf_event * bp,const struct perf_event_attr * attr,struct arch_hw_breakpoint * hw)50*4882a593Smuzhiyun int hw_breakpoint_arch_parse(struct perf_event *bp,
51*4882a593Smuzhiyun const struct perf_event_attr *attr,
52*4882a593Smuzhiyun struct arch_hw_breakpoint *hw)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun /* Type */
55*4882a593Smuzhiyun switch (attr->bp_type) {
56*4882a593Smuzhiyun case HW_BREAKPOINT_X:
57*4882a593Smuzhiyun hw->type = XTENSA_BREAKPOINT_EXECUTE;
58*4882a593Smuzhiyun break;
59*4882a593Smuzhiyun case HW_BREAKPOINT_R:
60*4882a593Smuzhiyun hw->type = XTENSA_BREAKPOINT_LOAD;
61*4882a593Smuzhiyun break;
62*4882a593Smuzhiyun case HW_BREAKPOINT_W:
63*4882a593Smuzhiyun hw->type = XTENSA_BREAKPOINT_STORE;
64*4882a593Smuzhiyun break;
65*4882a593Smuzhiyun case HW_BREAKPOINT_RW:
66*4882a593Smuzhiyun hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
67*4882a593Smuzhiyun break;
68*4882a593Smuzhiyun default:
69*4882a593Smuzhiyun return -EINVAL;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Len */
73*4882a593Smuzhiyun hw->len = attr->bp_len;
74*4882a593Smuzhiyun if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
75*4882a593Smuzhiyun return -EINVAL;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* Address */
78*4882a593Smuzhiyun hw->address = attr->bp_addr;
79*4882a593Smuzhiyun if (hw->address & (hw->len - 1))
80*4882a593Smuzhiyun return -EINVAL;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return 0;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
hw_breakpoint_exceptions_notify(struct notifier_block * unused,unsigned long val,void * data)85*4882a593Smuzhiyun int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
86*4882a593Smuzhiyun unsigned long val, void *data)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun return NOTIFY_DONE;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
xtensa_wsr(unsigned long v,u8 sr)91*4882a593Smuzhiyun static void xtensa_wsr(unsigned long v, u8 sr)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun /* We don't have indexed wsr and creating instruction dynamically
94*4882a593Smuzhiyun * doesn't seem worth it given how small XCHAL_NUM_IBREAK and
95*4882a593Smuzhiyun * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here
96*4882a593Smuzhiyun * the switch below needs to be extended.
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2);
99*4882a593Smuzhiyun BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun switch (sr) {
102*4882a593Smuzhiyun #if XCHAL_NUM_IBREAK > 0
103*4882a593Smuzhiyun case SREG_IBREAKA + 0:
104*4882a593Smuzhiyun xtensa_set_sr(v, SREG_IBREAKA + 0);
105*4882a593Smuzhiyun break;
106*4882a593Smuzhiyun #endif
107*4882a593Smuzhiyun #if XCHAL_NUM_IBREAK > 1
108*4882a593Smuzhiyun case SREG_IBREAKA + 1:
109*4882a593Smuzhiyun xtensa_set_sr(v, SREG_IBREAKA + 1);
110*4882a593Smuzhiyun break;
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #if XCHAL_NUM_DBREAK > 0
114*4882a593Smuzhiyun case SREG_DBREAKA + 0:
115*4882a593Smuzhiyun xtensa_set_sr(v, SREG_DBREAKA + 0);
116*4882a593Smuzhiyun break;
117*4882a593Smuzhiyun case SREG_DBREAKC + 0:
118*4882a593Smuzhiyun xtensa_set_sr(v, SREG_DBREAKC + 0);
119*4882a593Smuzhiyun break;
120*4882a593Smuzhiyun #endif
121*4882a593Smuzhiyun #if XCHAL_NUM_DBREAK > 1
122*4882a593Smuzhiyun case SREG_DBREAKA + 1:
123*4882a593Smuzhiyun xtensa_set_sr(v, SREG_DBREAKA + 1);
124*4882a593Smuzhiyun break;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun case SREG_DBREAKC + 1:
127*4882a593Smuzhiyun xtensa_set_sr(v, SREG_DBREAKC + 1);
128*4882a593Smuzhiyun break;
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
alloc_slot(struct perf_event ** slot,size_t n,struct perf_event * bp)133*4882a593Smuzhiyun static int alloc_slot(struct perf_event **slot, size_t n,
134*4882a593Smuzhiyun struct perf_event *bp)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun size_t i;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun for (i = 0; i < n; ++i) {
139*4882a593Smuzhiyun if (!slot[i]) {
140*4882a593Smuzhiyun slot[i] = bp;
141*4882a593Smuzhiyun return i;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun return -EBUSY;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
set_ibreak_regs(int reg,struct perf_event * bp)147*4882a593Smuzhiyun static void set_ibreak_regs(int reg, struct perf_event *bp)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct arch_hw_breakpoint *info = counter_arch_bp(bp);
150*4882a593Smuzhiyun unsigned long ibreakenable;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun xtensa_wsr(info->address, SREG_IBREAKA + reg);
153*4882a593Smuzhiyun ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE);
154*4882a593Smuzhiyun xtensa_set_sr(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
set_dbreak_regs(int reg,struct perf_event * bp)157*4882a593Smuzhiyun static void set_dbreak_regs(int reg, struct perf_event *bp)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct arch_hw_breakpoint *info = counter_arch_bp(bp);
160*4882a593Smuzhiyun unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (info->type & XTENSA_BREAKPOINT_LOAD)
163*4882a593Smuzhiyun dbreakc |= DBREAKC_LOAD_MASK;
164*4882a593Smuzhiyun if (info->type & XTENSA_BREAKPOINT_STORE)
165*4882a593Smuzhiyun dbreakc |= DBREAKC_STOR_MASK;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun xtensa_wsr(info->address, SREG_DBREAKA + reg);
168*4882a593Smuzhiyun xtensa_wsr(dbreakc, SREG_DBREAKC + reg);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
arch_install_hw_breakpoint(struct perf_event * bp)171*4882a593Smuzhiyun int arch_install_hw_breakpoint(struct perf_event *bp)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun int i;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) {
176*4882a593Smuzhiyun /* Breakpoint */
177*4882a593Smuzhiyun i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
178*4882a593Smuzhiyun if (i < 0)
179*4882a593Smuzhiyun return i;
180*4882a593Smuzhiyun set_ibreak_regs(i, bp);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun } else {
183*4882a593Smuzhiyun /* Watchpoint */
184*4882a593Smuzhiyun i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
185*4882a593Smuzhiyun if (i < 0)
186*4882a593Smuzhiyun return i;
187*4882a593Smuzhiyun set_dbreak_regs(i, bp);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun return 0;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
free_slot(struct perf_event ** slot,size_t n,struct perf_event * bp)192*4882a593Smuzhiyun static int free_slot(struct perf_event **slot, size_t n,
193*4882a593Smuzhiyun struct perf_event *bp)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun size_t i;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun for (i = 0; i < n; ++i) {
198*4882a593Smuzhiyun if (slot[i] == bp) {
199*4882a593Smuzhiyun slot[i] = NULL;
200*4882a593Smuzhiyun return i;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun return -EBUSY;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
arch_uninstall_hw_breakpoint(struct perf_event * bp)206*4882a593Smuzhiyun void arch_uninstall_hw_breakpoint(struct perf_event *bp)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct arch_hw_breakpoint *info = counter_arch_bp(bp);
209*4882a593Smuzhiyun int i;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (info->type == XTENSA_BREAKPOINT_EXECUTE) {
212*4882a593Smuzhiyun unsigned long ibreakenable;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* Breakpoint */
215*4882a593Smuzhiyun i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
216*4882a593Smuzhiyun if (i >= 0) {
217*4882a593Smuzhiyun ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE);
218*4882a593Smuzhiyun xtensa_set_sr(ibreakenable & ~(1 << i),
219*4882a593Smuzhiyun SREG_IBREAKENABLE);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun } else {
222*4882a593Smuzhiyun /* Watchpoint */
223*4882a593Smuzhiyun i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
224*4882a593Smuzhiyun if (i >= 0)
225*4882a593Smuzhiyun xtensa_wsr(0, SREG_DBREAKC + i);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
hw_breakpoint_pmu_read(struct perf_event * bp)229*4882a593Smuzhiyun void hw_breakpoint_pmu_read(struct perf_event *bp)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
flush_ptrace_hw_breakpoint(struct task_struct * tsk)233*4882a593Smuzhiyun void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun int i;
236*4882a593Smuzhiyun struct thread_struct *t = &tsk->thread;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
239*4882a593Smuzhiyun if (t->ptrace_bp[i]) {
240*4882a593Smuzhiyun unregister_hw_breakpoint(t->ptrace_bp[i]);
241*4882a593Smuzhiyun t->ptrace_bp[i] = NULL;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
245*4882a593Smuzhiyun if (t->ptrace_wp[i]) {
246*4882a593Smuzhiyun unregister_hw_breakpoint(t->ptrace_wp[i]);
247*4882a593Smuzhiyun t->ptrace_wp[i] = NULL;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun * Set ptrace breakpoint pointers to zero for this task.
254*4882a593Smuzhiyun * This is required in order to prevent child processes from unregistering
255*4882a593Smuzhiyun * breakpoints held by their parent.
256*4882a593Smuzhiyun */
clear_ptrace_hw_breakpoint(struct task_struct * tsk)257*4882a593Smuzhiyun void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
260*4882a593Smuzhiyun memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
restore_dbreak(void)263*4882a593Smuzhiyun void restore_dbreak(void)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun int i;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
268*4882a593Smuzhiyun struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i];
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (bp)
271*4882a593Smuzhiyun set_dbreak_regs(i, bp);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun clear_thread_flag(TIF_DB_DISABLED);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
check_hw_breakpoint(struct pt_regs * regs)276*4882a593Smuzhiyun int check_hw_breakpoint(struct pt_regs *regs)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) {
279*4882a593Smuzhiyun int i;
280*4882a593Smuzhiyun struct perf_event **bp = this_cpu_ptr(bp_on_reg);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
283*4882a593Smuzhiyun if (bp[i] && !bp[i]->attr.disabled &&
284*4882a593Smuzhiyun regs->pc == bp[i]->attr.bp_addr)
285*4882a593Smuzhiyun perf_bp_event(bp[i], regs);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun return 0;
288*4882a593Smuzhiyun } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) {
289*4882a593Smuzhiyun struct perf_event **bp = this_cpu_ptr(wp_on_reg);
290*4882a593Smuzhiyun int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >>
291*4882a593Smuzhiyun DEBUGCAUSE_DBNUM_SHIFT;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) {
294*4882a593Smuzhiyun if (user_mode(regs)) {
295*4882a593Smuzhiyun perf_bp_event(bp[dbnum], regs);
296*4882a593Smuzhiyun } else {
297*4882a593Smuzhiyun set_thread_flag(TIF_DB_DISABLED);
298*4882a593Smuzhiyun xtensa_wsr(0, SREG_DBREAKC + dbnum);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun } else {
301*4882a593Smuzhiyun WARN_ONCE(1,
302*4882a593Smuzhiyun "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n",
303*4882a593Smuzhiyun dbnum);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun return 0;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun return -ENOENT;
308*4882a593Smuzhiyun }
309