xref: /OK3568_Linux_fs/kernel/arch/x86/mm/mmio-mod.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) IBM Corporation, 2005
5*4882a593Smuzhiyun  *               Jeff Muizelaar, 2006, 2007
6*4882a593Smuzhiyun  *               Pekka Paalanen, 2008 <pq@iki.fi>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Derived from the read-mod example from relay-examples by Tom Zanussi.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define pr_fmt(fmt) "mmiotrace: " fmt
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define DEBUG 1
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/moduleparam.h>
16*4882a593Smuzhiyun #include <linux/debugfs.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/uaccess.h>
19*4882a593Smuzhiyun #include <linux/io.h>
20*4882a593Smuzhiyun #include <linux/mmiotrace.h>
21*4882a593Smuzhiyun #include <linux/pgtable.h>
22*4882a593Smuzhiyun #include <asm/e820/api.h> /* for ISA_START_ADDRESS */
23*4882a593Smuzhiyun #include <linux/atomic.h>
24*4882a593Smuzhiyun #include <linux/percpu.h>
25*4882a593Smuzhiyun #include <linux/cpu.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "pf_in.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct trap_reason {
30*4882a593Smuzhiyun 	unsigned long addr;
31*4882a593Smuzhiyun 	unsigned long ip;
32*4882a593Smuzhiyun 	enum reason_type type;
33*4882a593Smuzhiyun 	int active_traces;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct remap_trace {
37*4882a593Smuzhiyun 	struct list_head list;
38*4882a593Smuzhiyun 	struct kmmio_probe probe;
39*4882a593Smuzhiyun 	resource_size_t phys;
40*4882a593Smuzhiyun 	unsigned long id;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* Accessed per-cpu. */
44*4882a593Smuzhiyun static DEFINE_PER_CPU(struct trap_reason, pf_reason);
45*4882a593Smuzhiyun static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static DEFINE_MUTEX(mmiotrace_mutex);
48*4882a593Smuzhiyun static DEFINE_SPINLOCK(trace_lock);
49*4882a593Smuzhiyun static atomic_t mmiotrace_enabled;
50*4882a593Smuzhiyun static LIST_HEAD(trace_list);		/* struct remap_trace */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * Locking in this file:
54*4882a593Smuzhiyun  * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
55*4882a593Smuzhiyun  * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
56*4882a593Smuzhiyun  *   and trace_lock.
57*4882a593Smuzhiyun  * - Routines depending on is_enabled() must take trace_lock.
58*4882a593Smuzhiyun  * - trace_list users must hold trace_lock.
59*4882a593Smuzhiyun  * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
60*4882a593Smuzhiyun  * - pre/post callbacks assume the effect of is_enabled() being true.
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* module parameters */
64*4882a593Smuzhiyun static unsigned long	filter_offset;
65*4882a593Smuzhiyun static bool		nommiotrace;
66*4882a593Smuzhiyun static bool		trace_pc;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun module_param(filter_offset, ulong, 0);
69*4882a593Smuzhiyun module_param(nommiotrace, bool, 0);
70*4882a593Smuzhiyun module_param(trace_pc, bool, 0);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
73*4882a593Smuzhiyun MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
74*4882a593Smuzhiyun MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
75*4882a593Smuzhiyun 
is_enabled(void)76*4882a593Smuzhiyun static bool is_enabled(void)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	return atomic_read(&mmiotrace_enabled);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
print_pte(unsigned long address)81*4882a593Smuzhiyun static void print_pte(unsigned long address)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	unsigned int level;
84*4882a593Smuzhiyun 	pte_t *pte = lookup_address(address, &level);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (!pte) {
87*4882a593Smuzhiyun 		pr_err("Error in %s: no pte for page 0x%08lx\n",
88*4882a593Smuzhiyun 		       __func__, address);
89*4882a593Smuzhiyun 		return;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (level == PG_LEVEL_2M) {
93*4882a593Smuzhiyun 		pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
94*4882a593Smuzhiyun 			 address);
95*4882a593Smuzhiyun 		BUG();
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 	pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
98*4882a593Smuzhiyun 		address,
99*4882a593Smuzhiyun 		(unsigned long long)pte_val(*pte),
100*4882a593Smuzhiyun 		(unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun  * For some reason the pre/post pairs have been called in an
105*4882a593Smuzhiyun  * unmatched order. Report and die.
106*4882a593Smuzhiyun  */
die_kmmio_nesting_error(struct pt_regs * regs,unsigned long addr)107*4882a593Smuzhiyun static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
110*4882a593Smuzhiyun 	pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
111*4882a593Smuzhiyun 		 addr, my_reason->addr);
112*4882a593Smuzhiyun 	print_pte(addr);
113*4882a593Smuzhiyun 	pr_emerg("faulting IP is at %pS\n", (void *)regs->ip);
114*4882a593Smuzhiyun 	pr_emerg("last faulting IP was at %pS\n", (void *)my_reason->ip);
115*4882a593Smuzhiyun #ifdef __i386__
116*4882a593Smuzhiyun 	pr_emerg("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
117*4882a593Smuzhiyun 		 regs->ax, regs->bx, regs->cx, regs->dx);
118*4882a593Smuzhiyun 	pr_emerg("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
119*4882a593Smuzhiyun 		 regs->si, regs->di, regs->bp, regs->sp);
120*4882a593Smuzhiyun #else
121*4882a593Smuzhiyun 	pr_emerg("rax: %016lx   rcx: %016lx   rdx: %016lx\n",
122*4882a593Smuzhiyun 		 regs->ax, regs->cx, regs->dx);
123*4882a593Smuzhiyun 	pr_emerg("rsi: %016lx   rdi: %016lx   rbp: %016lx   rsp: %016lx\n",
124*4882a593Smuzhiyun 		 regs->si, regs->di, regs->bp, regs->sp);
125*4882a593Smuzhiyun #endif
126*4882a593Smuzhiyun 	put_cpu_var(pf_reason);
127*4882a593Smuzhiyun 	BUG();
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
pre(struct kmmio_probe * p,struct pt_regs * regs,unsigned long addr)130*4882a593Smuzhiyun static void pre(struct kmmio_probe *p, struct pt_regs *regs,
131*4882a593Smuzhiyun 						unsigned long addr)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct trap_reason *my_reason = &get_cpu_var(pf_reason);
134*4882a593Smuzhiyun 	struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
135*4882a593Smuzhiyun 	const unsigned long instptr = instruction_pointer(regs);
136*4882a593Smuzhiyun 	const enum reason_type type = get_ins_type(instptr);
137*4882a593Smuzhiyun 	struct remap_trace *trace = p->private;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* it doesn't make sense to have more than one active trace per cpu */
140*4882a593Smuzhiyun 	if (my_reason->active_traces)
141*4882a593Smuzhiyun 		die_kmmio_nesting_error(regs, addr);
142*4882a593Smuzhiyun 	else
143*4882a593Smuzhiyun 		my_reason->active_traces++;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	my_reason->type = type;
146*4882a593Smuzhiyun 	my_reason->addr = addr;
147*4882a593Smuzhiyun 	my_reason->ip = instptr;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	my_trace->phys = addr - trace->probe.addr + trace->phys;
150*4882a593Smuzhiyun 	my_trace->map_id = trace->id;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/*
153*4882a593Smuzhiyun 	 * Only record the program counter when requested.
154*4882a593Smuzhiyun 	 * It may taint clean-room reverse engineering.
155*4882a593Smuzhiyun 	 */
156*4882a593Smuzhiyun 	if (trace_pc)
157*4882a593Smuzhiyun 		my_trace->pc = instptr;
158*4882a593Smuzhiyun 	else
159*4882a593Smuzhiyun 		my_trace->pc = 0;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/*
162*4882a593Smuzhiyun 	 * XXX: the timestamp recorded will be *after* the tracing has been
163*4882a593Smuzhiyun 	 * done, not at the time we hit the instruction. SMP implications
164*4882a593Smuzhiyun 	 * on event ordering?
165*4882a593Smuzhiyun 	 */
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	switch (type) {
168*4882a593Smuzhiyun 	case REG_READ:
169*4882a593Smuzhiyun 		my_trace->opcode = MMIO_READ;
170*4882a593Smuzhiyun 		my_trace->width = get_ins_mem_width(instptr);
171*4882a593Smuzhiyun 		break;
172*4882a593Smuzhiyun 	case REG_WRITE:
173*4882a593Smuzhiyun 		my_trace->opcode = MMIO_WRITE;
174*4882a593Smuzhiyun 		my_trace->width = get_ins_mem_width(instptr);
175*4882a593Smuzhiyun 		my_trace->value = get_ins_reg_val(instptr, regs);
176*4882a593Smuzhiyun 		break;
177*4882a593Smuzhiyun 	case IMM_WRITE:
178*4882a593Smuzhiyun 		my_trace->opcode = MMIO_WRITE;
179*4882a593Smuzhiyun 		my_trace->width = get_ins_mem_width(instptr);
180*4882a593Smuzhiyun 		my_trace->value = get_ins_imm_val(instptr);
181*4882a593Smuzhiyun 		break;
182*4882a593Smuzhiyun 	default:
183*4882a593Smuzhiyun 		{
184*4882a593Smuzhiyun 			unsigned char *ip = (unsigned char *)instptr;
185*4882a593Smuzhiyun 			my_trace->opcode = MMIO_UNKNOWN_OP;
186*4882a593Smuzhiyun 			my_trace->width = 0;
187*4882a593Smuzhiyun 			my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
188*4882a593Smuzhiyun 								*(ip + 2);
189*4882a593Smuzhiyun 		}
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 	put_cpu_var(cpu_trace);
192*4882a593Smuzhiyun 	put_cpu_var(pf_reason);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
post(struct kmmio_probe * p,unsigned long condition,struct pt_regs * regs)195*4882a593Smuzhiyun static void post(struct kmmio_probe *p, unsigned long condition,
196*4882a593Smuzhiyun 							struct pt_regs *regs)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	struct trap_reason *my_reason = &get_cpu_var(pf_reason);
199*4882a593Smuzhiyun 	struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* this should always return the active_trace count to 0 */
202*4882a593Smuzhiyun 	my_reason->active_traces--;
203*4882a593Smuzhiyun 	if (my_reason->active_traces) {
204*4882a593Smuzhiyun 		pr_emerg("unexpected post handler");
205*4882a593Smuzhiyun 		BUG();
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	switch (my_reason->type) {
209*4882a593Smuzhiyun 	case REG_READ:
210*4882a593Smuzhiyun 		my_trace->value = get_ins_reg_val(my_reason->ip, regs);
211*4882a593Smuzhiyun 		break;
212*4882a593Smuzhiyun 	default:
213*4882a593Smuzhiyun 		break;
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	mmio_trace_rw(my_trace);
217*4882a593Smuzhiyun 	put_cpu_var(cpu_trace);
218*4882a593Smuzhiyun 	put_cpu_var(pf_reason);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
ioremap_trace_core(resource_size_t offset,unsigned long size,void __iomem * addr)221*4882a593Smuzhiyun static void ioremap_trace_core(resource_size_t offset, unsigned long size,
222*4882a593Smuzhiyun 							void __iomem *addr)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	static atomic_t next_id;
225*4882a593Smuzhiyun 	struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
226*4882a593Smuzhiyun 	/* These are page-unaligned. */
227*4882a593Smuzhiyun 	struct mmiotrace_map map = {
228*4882a593Smuzhiyun 		.phys = offset,
229*4882a593Smuzhiyun 		.virt = (unsigned long)addr,
230*4882a593Smuzhiyun 		.len = size,
231*4882a593Smuzhiyun 		.opcode = MMIO_PROBE
232*4882a593Smuzhiyun 	};
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (!trace) {
235*4882a593Smuzhiyun 		pr_err("kmalloc failed in ioremap\n");
236*4882a593Smuzhiyun 		return;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	*trace = (struct remap_trace) {
240*4882a593Smuzhiyun 		.probe = {
241*4882a593Smuzhiyun 			.addr = (unsigned long)addr,
242*4882a593Smuzhiyun 			.len = size,
243*4882a593Smuzhiyun 			.pre_handler = pre,
244*4882a593Smuzhiyun 			.post_handler = post,
245*4882a593Smuzhiyun 			.private = trace
246*4882a593Smuzhiyun 		},
247*4882a593Smuzhiyun 		.phys = offset,
248*4882a593Smuzhiyun 		.id = atomic_inc_return(&next_id)
249*4882a593Smuzhiyun 	};
250*4882a593Smuzhiyun 	map.map_id = trace->id;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	spin_lock_irq(&trace_lock);
253*4882a593Smuzhiyun 	if (!is_enabled()) {
254*4882a593Smuzhiyun 		kfree(trace);
255*4882a593Smuzhiyun 		goto not_enabled;
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	mmio_trace_mapping(&map);
259*4882a593Smuzhiyun 	list_add_tail(&trace->list, &trace_list);
260*4882a593Smuzhiyun 	if (!nommiotrace)
261*4882a593Smuzhiyun 		register_kmmio_probe(&trace->probe);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun not_enabled:
264*4882a593Smuzhiyun 	spin_unlock_irq(&trace_lock);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
mmiotrace_ioremap(resource_size_t offset,unsigned long size,void __iomem * addr)267*4882a593Smuzhiyun void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
268*4882a593Smuzhiyun 						void __iomem *addr)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	if (!is_enabled()) /* recheck and proper locking in *_core() */
271*4882a593Smuzhiyun 		return;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
274*4882a593Smuzhiyun 		 (unsigned long long)offset, size, addr);
275*4882a593Smuzhiyun 	if ((filter_offset) && (offset != filter_offset))
276*4882a593Smuzhiyun 		return;
277*4882a593Smuzhiyun 	ioremap_trace_core(offset, size, addr);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
iounmap_trace_core(volatile void __iomem * addr)280*4882a593Smuzhiyun static void iounmap_trace_core(volatile void __iomem *addr)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct mmiotrace_map map = {
283*4882a593Smuzhiyun 		.phys = 0,
284*4882a593Smuzhiyun 		.virt = (unsigned long)addr,
285*4882a593Smuzhiyun 		.len = 0,
286*4882a593Smuzhiyun 		.opcode = MMIO_UNPROBE
287*4882a593Smuzhiyun 	};
288*4882a593Smuzhiyun 	struct remap_trace *trace;
289*4882a593Smuzhiyun 	struct remap_trace *tmp;
290*4882a593Smuzhiyun 	struct remap_trace *found_trace = NULL;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	pr_debug("Unmapping %p.\n", addr);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	spin_lock_irq(&trace_lock);
295*4882a593Smuzhiyun 	if (!is_enabled())
296*4882a593Smuzhiyun 		goto not_enabled;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	list_for_each_entry_safe(trace, tmp, &trace_list, list) {
299*4882a593Smuzhiyun 		if ((unsigned long)addr == trace->probe.addr) {
300*4882a593Smuzhiyun 			if (!nommiotrace)
301*4882a593Smuzhiyun 				unregister_kmmio_probe(&trace->probe);
302*4882a593Smuzhiyun 			list_del(&trace->list);
303*4882a593Smuzhiyun 			found_trace = trace;
304*4882a593Smuzhiyun 			break;
305*4882a593Smuzhiyun 		}
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 	map.map_id = (found_trace) ? found_trace->id : -1;
308*4882a593Smuzhiyun 	mmio_trace_mapping(&map);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun not_enabled:
311*4882a593Smuzhiyun 	spin_unlock_irq(&trace_lock);
312*4882a593Smuzhiyun 	if (found_trace) {
313*4882a593Smuzhiyun 		synchronize_rcu(); /* unregister_kmmio_probe() requirement */
314*4882a593Smuzhiyun 		kfree(found_trace);
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
mmiotrace_iounmap(volatile void __iomem * addr)318*4882a593Smuzhiyun void mmiotrace_iounmap(volatile void __iomem *addr)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	might_sleep();
321*4882a593Smuzhiyun 	if (is_enabled()) /* recheck and proper locking in *_core() */
322*4882a593Smuzhiyun 		iounmap_trace_core(addr);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
mmiotrace_printk(const char * fmt,...)325*4882a593Smuzhiyun int mmiotrace_printk(const char *fmt, ...)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	int ret = 0;
328*4882a593Smuzhiyun 	va_list args;
329*4882a593Smuzhiyun 	unsigned long flags;
330*4882a593Smuzhiyun 	va_start(args, fmt);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	spin_lock_irqsave(&trace_lock, flags);
333*4882a593Smuzhiyun 	if (is_enabled())
334*4882a593Smuzhiyun 		ret = mmio_trace_printk(fmt, args);
335*4882a593Smuzhiyun 	spin_unlock_irqrestore(&trace_lock, flags);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	va_end(args);
338*4882a593Smuzhiyun 	return ret;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun EXPORT_SYMBOL(mmiotrace_printk);
341*4882a593Smuzhiyun 
clear_trace_list(void)342*4882a593Smuzhiyun static void clear_trace_list(void)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	struct remap_trace *trace;
345*4882a593Smuzhiyun 	struct remap_trace *tmp;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/*
348*4882a593Smuzhiyun 	 * No locking required, because the caller ensures we are in a
349*4882a593Smuzhiyun 	 * critical section via mutex, and is_enabled() is false,
350*4882a593Smuzhiyun 	 * i.e. nothing can traverse or modify this list.
351*4882a593Smuzhiyun 	 * Caller also ensures is_enabled() cannot change.
352*4882a593Smuzhiyun 	 */
353*4882a593Smuzhiyun 	list_for_each_entry(trace, &trace_list, list) {
354*4882a593Smuzhiyun 		pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
355*4882a593Smuzhiyun 			  trace->probe.addr, trace->probe.len);
356*4882a593Smuzhiyun 		if (!nommiotrace)
357*4882a593Smuzhiyun 			unregister_kmmio_probe(&trace->probe);
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 	synchronize_rcu(); /* unregister_kmmio_probe() requirement */
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	list_for_each_entry_safe(trace, tmp, &trace_list, list) {
362*4882a593Smuzhiyun 		list_del(&trace->list);
363*4882a593Smuzhiyun 		kfree(trace);
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
368*4882a593Smuzhiyun static cpumask_var_t downed_cpus;
369*4882a593Smuzhiyun 
enter_uniprocessor(void)370*4882a593Smuzhiyun static void enter_uniprocessor(void)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	int cpu;
373*4882a593Smuzhiyun 	int err;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	if (!cpumask_available(downed_cpus) &&
376*4882a593Smuzhiyun 	    !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
377*4882a593Smuzhiyun 		pr_notice("Failed to allocate mask\n");
378*4882a593Smuzhiyun 		goto out;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	get_online_cpus();
382*4882a593Smuzhiyun 	cpumask_copy(downed_cpus, cpu_online_mask);
383*4882a593Smuzhiyun 	cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
384*4882a593Smuzhiyun 	if (num_online_cpus() > 1)
385*4882a593Smuzhiyun 		pr_notice("Disabling non-boot CPUs...\n");
386*4882a593Smuzhiyun 	put_online_cpus();
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	for_each_cpu(cpu, downed_cpus) {
389*4882a593Smuzhiyun 		err = remove_cpu(cpu);
390*4882a593Smuzhiyun 		if (!err)
391*4882a593Smuzhiyun 			pr_info("CPU%d is down.\n", cpu);
392*4882a593Smuzhiyun 		else
393*4882a593Smuzhiyun 			pr_err("Error taking CPU%d down: %d\n", cpu, err);
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun out:
396*4882a593Smuzhiyun 	if (num_online_cpus() > 1)
397*4882a593Smuzhiyun 		pr_warn("multiple CPUs still online, may miss events.\n");
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
leave_uniprocessor(void)400*4882a593Smuzhiyun static void leave_uniprocessor(void)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	int cpu;
403*4882a593Smuzhiyun 	int err;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
406*4882a593Smuzhiyun 		return;
407*4882a593Smuzhiyun 	pr_notice("Re-enabling CPUs...\n");
408*4882a593Smuzhiyun 	for_each_cpu(cpu, downed_cpus) {
409*4882a593Smuzhiyun 		err = add_cpu(cpu);
410*4882a593Smuzhiyun 		if (!err)
411*4882a593Smuzhiyun 			pr_info("enabled CPU%d.\n", cpu);
412*4882a593Smuzhiyun 		else
413*4882a593Smuzhiyun 			pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun #else /* !CONFIG_HOTPLUG_CPU */
enter_uniprocessor(void)418*4882a593Smuzhiyun static void enter_uniprocessor(void)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	if (num_online_cpus() > 1)
421*4882a593Smuzhiyun 		pr_warn("multiple CPUs are online, may miss events. "
422*4882a593Smuzhiyun 			"Suggest booting with maxcpus=1 kernel argument.\n");
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
leave_uniprocessor(void)425*4882a593Smuzhiyun static void leave_uniprocessor(void)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun #endif
429*4882a593Smuzhiyun 
enable_mmiotrace(void)430*4882a593Smuzhiyun void enable_mmiotrace(void)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	mutex_lock(&mmiotrace_mutex);
433*4882a593Smuzhiyun 	if (is_enabled())
434*4882a593Smuzhiyun 		goto out;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	if (nommiotrace)
437*4882a593Smuzhiyun 		pr_info("MMIO tracing disabled.\n");
438*4882a593Smuzhiyun 	kmmio_init();
439*4882a593Smuzhiyun 	enter_uniprocessor();
440*4882a593Smuzhiyun 	spin_lock_irq(&trace_lock);
441*4882a593Smuzhiyun 	atomic_inc(&mmiotrace_enabled);
442*4882a593Smuzhiyun 	spin_unlock_irq(&trace_lock);
443*4882a593Smuzhiyun 	pr_info("enabled.\n");
444*4882a593Smuzhiyun out:
445*4882a593Smuzhiyun 	mutex_unlock(&mmiotrace_mutex);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
disable_mmiotrace(void)448*4882a593Smuzhiyun void disable_mmiotrace(void)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	mutex_lock(&mmiotrace_mutex);
451*4882a593Smuzhiyun 	if (!is_enabled())
452*4882a593Smuzhiyun 		goto out;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	spin_lock_irq(&trace_lock);
455*4882a593Smuzhiyun 	atomic_dec(&mmiotrace_enabled);
456*4882a593Smuzhiyun 	BUG_ON(is_enabled());
457*4882a593Smuzhiyun 	spin_unlock_irq(&trace_lock);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	clear_trace_list(); /* guarantees: no more kmmio callbacks */
460*4882a593Smuzhiyun 	leave_uniprocessor();
461*4882a593Smuzhiyun 	kmmio_cleanup();
462*4882a593Smuzhiyun 	pr_info("disabled.\n");
463*4882a593Smuzhiyun out:
464*4882a593Smuzhiyun 	mutex_unlock(&mmiotrace_mutex);
465*4882a593Smuzhiyun }
466