xref: /OK3568_Linux_fs/kernel/drivers/oprofile/buffer_sync.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /**
2*4882a593Smuzhiyun  * @file buffer_sync.c
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * @remark Copyright 2002-2009 OProfile authors
5*4882a593Smuzhiyun  * @remark Read the file COPYING
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * @author John Levon <levon@movementarian.org>
8*4882a593Smuzhiyun  * @author Barry Kasindorf
9*4882a593Smuzhiyun  * @author Robert Richter <robert.richter@amd.com>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This is the core of the buffer management. Each
12*4882a593Smuzhiyun  * CPU buffer is processed and entered into the
13*4882a593Smuzhiyun  * global event buffer. Such processing is necessary
14*4882a593Smuzhiyun  * in several circumstances, mentioned below.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * The processing does the job of converting the
17*4882a593Smuzhiyun  * transitory EIP value into a persistent dentry/offset
18*4882a593Smuzhiyun  * value that the profiler can record at its leisure.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * See fs/dcookies.c for a description of the dentry/offset
21*4882a593Smuzhiyun  * objects.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/file.h>
25*4882a593Smuzhiyun #include <linux/mm.h>
26*4882a593Smuzhiyun #include <linux/workqueue.h>
27*4882a593Smuzhiyun #include <linux/notifier.h>
28*4882a593Smuzhiyun #include <linux/dcookies.h>
29*4882a593Smuzhiyun #include <linux/profile.h>
30*4882a593Smuzhiyun #include <linux/module.h>
31*4882a593Smuzhiyun #include <linux/fs.h>
32*4882a593Smuzhiyun #include <linux/oprofile.h>
33*4882a593Smuzhiyun #include <linux/sched.h>
34*4882a593Smuzhiyun #include <linux/sched/mm.h>
35*4882a593Smuzhiyun #include <linux/sched/task.h>
36*4882a593Smuzhiyun #include <linux/gfp.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include "oprofile_stats.h"
39*4882a593Smuzhiyun #include "event_buffer.h"
40*4882a593Smuzhiyun #include "cpu_buffer.h"
41*4882a593Smuzhiyun #include "buffer_sync.h"
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static LIST_HEAD(dying_tasks);
44*4882a593Smuzhiyun static LIST_HEAD(dead_tasks);
45*4882a593Smuzhiyun static cpumask_var_t marked_cpus;
46*4882a593Smuzhiyun static DEFINE_SPINLOCK(task_mortuary);
47*4882a593Smuzhiyun static void process_task_mortuary(void);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* Take ownership of the task struct and place it on the
50*4882a593Smuzhiyun  * list for processing. Only after two full buffer syncs
51*4882a593Smuzhiyun  * does the task eventually get freed, because by then
52*4882a593Smuzhiyun  * we are sure we will not reference it again.
53*4882a593Smuzhiyun  * Can be invoked from softirq via RCU callback due to
54*4882a593Smuzhiyun  * call_rcu() of the task struct, hence the _irqsave.
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun static int
task_free_notify(struct notifier_block * self,unsigned long val,void * data)57*4882a593Smuzhiyun task_free_notify(struct notifier_block *self, unsigned long val, void *data)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	unsigned long flags;
60*4882a593Smuzhiyun 	struct task_struct *task = data;
61*4882a593Smuzhiyun 	spin_lock_irqsave(&task_mortuary, flags);
62*4882a593Smuzhiyun 	list_add(&task->tasks, &dying_tasks);
63*4882a593Smuzhiyun 	spin_unlock_irqrestore(&task_mortuary, flags);
64*4882a593Smuzhiyun 	return NOTIFY_OK;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* The task is on its way out. A sync of the buffer means we can catch
69*4882a593Smuzhiyun  * any remaining samples for this task.
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun static int
task_exit_notify(struct notifier_block * self,unsigned long val,void * data)72*4882a593Smuzhiyun task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	/* To avoid latency problems, we only process the current CPU,
75*4882a593Smuzhiyun 	 * hoping that most samples for the task are on this CPU
76*4882a593Smuzhiyun 	 */
77*4882a593Smuzhiyun 	sync_buffer(raw_smp_processor_id());
78*4882a593Smuzhiyun 	return 0;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /* The task is about to try a do_munmap(). We peek at what it's going to
83*4882a593Smuzhiyun  * do, and if it's an executable region, process the samples first, so
84*4882a593Smuzhiyun  * we don't lose any. This does not have to be exact, it's a QoI issue
85*4882a593Smuzhiyun  * only.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun static int
munmap_notify(struct notifier_block * self,unsigned long val,void * data)88*4882a593Smuzhiyun munmap_notify(struct notifier_block *self, unsigned long val, void *data)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)data;
91*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
92*4882a593Smuzhiyun 	struct vm_area_struct *mpnt;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	mmap_read_lock(mm);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	mpnt = find_vma(mm, addr);
97*4882a593Smuzhiyun 	if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
98*4882a593Smuzhiyun 		mmap_read_unlock(mm);
99*4882a593Smuzhiyun 		/* To avoid latency problems, we only process the current CPU,
100*4882a593Smuzhiyun 		 * hoping that most samples for the task are on this CPU
101*4882a593Smuzhiyun 		 */
102*4882a593Smuzhiyun 		sync_buffer(raw_smp_processor_id());
103*4882a593Smuzhiyun 		return 0;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	mmap_read_unlock(mm);
107*4882a593Smuzhiyun 	return 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* We need to be told about new modules so we don't attribute to a previously
112*4882a593Smuzhiyun  * loaded module, or drop the samples on the floor.
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun static int
module_load_notify(struct notifier_block * self,unsigned long val,void * data)115*4882a593Smuzhiyun module_load_notify(struct notifier_block *self, unsigned long val, void *data)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun #ifdef CONFIG_MODULES
118*4882a593Smuzhiyun 	if (val != MODULE_STATE_COMING)
119*4882a593Smuzhiyun 		return NOTIFY_DONE;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* FIXME: should we process all CPU buffers ? */
122*4882a593Smuzhiyun 	mutex_lock(&buffer_mutex);
123*4882a593Smuzhiyun 	add_event_entry(ESCAPE_CODE);
124*4882a593Smuzhiyun 	add_event_entry(MODULE_LOADED_CODE);
125*4882a593Smuzhiyun 	mutex_unlock(&buffer_mutex);
126*4882a593Smuzhiyun #endif
127*4882a593Smuzhiyun 	return NOTIFY_OK;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun static struct notifier_block task_free_nb = {
132*4882a593Smuzhiyun 	.notifier_call	= task_free_notify,
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun static struct notifier_block task_exit_nb = {
136*4882a593Smuzhiyun 	.notifier_call	= task_exit_notify,
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static struct notifier_block munmap_nb = {
140*4882a593Smuzhiyun 	.notifier_call	= munmap_notify,
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun static struct notifier_block module_load_nb = {
144*4882a593Smuzhiyun 	.notifier_call = module_load_notify,
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
free_all_tasks(void)147*4882a593Smuzhiyun static void free_all_tasks(void)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	/* make sure we don't leak task structs */
150*4882a593Smuzhiyun 	process_task_mortuary();
151*4882a593Smuzhiyun 	process_task_mortuary();
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
sync_start(void)154*4882a593Smuzhiyun int sync_start(void)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	int err;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
159*4882a593Smuzhiyun 		return -ENOMEM;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	err = task_handoff_register(&task_free_nb);
162*4882a593Smuzhiyun 	if (err)
163*4882a593Smuzhiyun 		goto out1;
164*4882a593Smuzhiyun 	err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
165*4882a593Smuzhiyun 	if (err)
166*4882a593Smuzhiyun 		goto out2;
167*4882a593Smuzhiyun 	err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
168*4882a593Smuzhiyun 	if (err)
169*4882a593Smuzhiyun 		goto out3;
170*4882a593Smuzhiyun 	err = register_module_notifier(&module_load_nb);
171*4882a593Smuzhiyun 	if (err)
172*4882a593Smuzhiyun 		goto out4;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	start_cpu_work();
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun out:
177*4882a593Smuzhiyun 	return err;
178*4882a593Smuzhiyun out4:
179*4882a593Smuzhiyun 	profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
180*4882a593Smuzhiyun out3:
181*4882a593Smuzhiyun 	profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
182*4882a593Smuzhiyun out2:
183*4882a593Smuzhiyun 	task_handoff_unregister(&task_free_nb);
184*4882a593Smuzhiyun 	free_all_tasks();
185*4882a593Smuzhiyun out1:
186*4882a593Smuzhiyun 	free_cpumask_var(marked_cpus);
187*4882a593Smuzhiyun 	goto out;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 
sync_stop(void)191*4882a593Smuzhiyun void sync_stop(void)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	end_cpu_work();
194*4882a593Smuzhiyun 	unregister_module_notifier(&module_load_nb);
195*4882a593Smuzhiyun 	profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
196*4882a593Smuzhiyun 	profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
197*4882a593Smuzhiyun 	task_handoff_unregister(&task_free_nb);
198*4882a593Smuzhiyun 	barrier();			/* do all of the above first */
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	flush_cpu_work();
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	free_all_tasks();
203*4882a593Smuzhiyun 	free_cpumask_var(marked_cpus);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /* Optimisation. We can manage without taking the dcookie sem
208*4882a593Smuzhiyun  * because we cannot reach this code without at least one
209*4882a593Smuzhiyun  * dcookie user still being registered (namely, the reader
210*4882a593Smuzhiyun  * of the event buffer). */
fast_get_dcookie(const struct path * path)211*4882a593Smuzhiyun static inline unsigned long fast_get_dcookie(const struct path *path)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	unsigned long cookie;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (path->dentry->d_flags & DCACHE_COOKIE)
216*4882a593Smuzhiyun 		return (unsigned long)path->dentry;
217*4882a593Smuzhiyun 	get_dcookie(path, &cookie);
218*4882a593Smuzhiyun 	return cookie;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun /* Look up the dcookie for the task's mm->exe_file,
223*4882a593Smuzhiyun  * which corresponds loosely to "application name". This is
224*4882a593Smuzhiyun  * not strictly necessary but allows oprofile to associate
225*4882a593Smuzhiyun  * shared-library samples with particular applications
226*4882a593Smuzhiyun  */
get_exec_dcookie(struct mm_struct * mm)227*4882a593Smuzhiyun static unsigned long get_exec_dcookie(struct mm_struct *mm)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	unsigned long cookie = NO_COOKIE;
230*4882a593Smuzhiyun 	struct file *exe_file;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (!mm)
233*4882a593Smuzhiyun 		goto done;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	exe_file = get_mm_exe_file(mm);
236*4882a593Smuzhiyun 	if (!exe_file)
237*4882a593Smuzhiyun 		goto done;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	cookie = fast_get_dcookie(&exe_file->f_path);
240*4882a593Smuzhiyun 	fput(exe_file);
241*4882a593Smuzhiyun done:
242*4882a593Smuzhiyun 	return cookie;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /* Convert the EIP value of a sample into a persistent dentry/offset
247*4882a593Smuzhiyun  * pair that can then be added to the global event buffer. We make
248*4882a593Smuzhiyun  * sure to do this lookup before a mm->mmap modification happens so
249*4882a593Smuzhiyun  * we don't lose track.
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  * The caller must ensure the mm is not nil (ie: not a kernel thread).
252*4882a593Smuzhiyun  */
253*4882a593Smuzhiyun static unsigned long
lookup_dcookie(struct mm_struct * mm,unsigned long addr,off_t * offset)254*4882a593Smuzhiyun lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	unsigned long cookie = NO_COOKIE;
257*4882a593Smuzhiyun 	struct vm_area_struct *vma;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	mmap_read_lock(mm);
260*4882a593Smuzhiyun 	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		if (addr < vma->vm_start || addr >= vma->vm_end)
263*4882a593Smuzhiyun 			continue;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		if (vma->vm_file) {
266*4882a593Smuzhiyun 			cookie = fast_get_dcookie(&vma->vm_file->f_path);
267*4882a593Smuzhiyun 			*offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
268*4882a593Smuzhiyun 				vma->vm_start;
269*4882a593Smuzhiyun 		} else {
270*4882a593Smuzhiyun 			/* must be an anonymous map */
271*4882a593Smuzhiyun 			*offset = addr;
272*4882a593Smuzhiyun 		}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 		break;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	if (!vma)
278*4882a593Smuzhiyun 		cookie = INVALID_COOKIE;
279*4882a593Smuzhiyun 	mmap_read_unlock(mm);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	return cookie;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun static unsigned long last_cookie = INVALID_COOKIE;
285*4882a593Smuzhiyun 
add_cpu_switch(int i)286*4882a593Smuzhiyun static void add_cpu_switch(int i)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	add_event_entry(ESCAPE_CODE);
289*4882a593Smuzhiyun 	add_event_entry(CPU_SWITCH_CODE);
290*4882a593Smuzhiyun 	add_event_entry(i);
291*4882a593Smuzhiyun 	last_cookie = INVALID_COOKIE;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
add_kernel_ctx_switch(unsigned int in_kernel)294*4882a593Smuzhiyun static void add_kernel_ctx_switch(unsigned int in_kernel)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	add_event_entry(ESCAPE_CODE);
297*4882a593Smuzhiyun 	if (in_kernel)
298*4882a593Smuzhiyun 		add_event_entry(KERNEL_ENTER_SWITCH_CODE);
299*4882a593Smuzhiyun 	else
300*4882a593Smuzhiyun 		add_event_entry(KERNEL_EXIT_SWITCH_CODE);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun static void
add_user_ctx_switch(struct task_struct const * task,unsigned long cookie)304*4882a593Smuzhiyun add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	add_event_entry(ESCAPE_CODE);
307*4882a593Smuzhiyun 	add_event_entry(CTX_SWITCH_CODE);
308*4882a593Smuzhiyun 	add_event_entry(task->pid);
309*4882a593Smuzhiyun 	add_event_entry(cookie);
310*4882a593Smuzhiyun 	/* Another code for daemon back-compat */
311*4882a593Smuzhiyun 	add_event_entry(ESCAPE_CODE);
312*4882a593Smuzhiyun 	add_event_entry(CTX_TGID_CODE);
313*4882a593Smuzhiyun 	add_event_entry(task->tgid);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 
add_cookie_switch(unsigned long cookie)317*4882a593Smuzhiyun static void add_cookie_switch(unsigned long cookie)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	add_event_entry(ESCAPE_CODE);
320*4882a593Smuzhiyun 	add_event_entry(COOKIE_SWITCH_CODE);
321*4882a593Smuzhiyun 	add_event_entry(cookie);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 
add_trace_begin(void)325*4882a593Smuzhiyun static void add_trace_begin(void)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	add_event_entry(ESCAPE_CODE);
328*4882a593Smuzhiyun 	add_event_entry(TRACE_BEGIN_CODE);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
add_data(struct op_entry * entry,struct mm_struct * mm)331*4882a593Smuzhiyun static void add_data(struct op_entry *entry, struct mm_struct *mm)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	unsigned long code, pc, val;
334*4882a593Smuzhiyun 	unsigned long cookie;
335*4882a593Smuzhiyun 	off_t offset;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (!op_cpu_buffer_get_data(entry, &code))
338*4882a593Smuzhiyun 		return;
339*4882a593Smuzhiyun 	if (!op_cpu_buffer_get_data(entry, &pc))
340*4882a593Smuzhiyun 		return;
341*4882a593Smuzhiyun 	if (!op_cpu_buffer_get_size(entry))
342*4882a593Smuzhiyun 		return;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (mm) {
345*4882a593Smuzhiyun 		cookie = lookup_dcookie(mm, pc, &offset);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 		if (cookie == NO_COOKIE)
348*4882a593Smuzhiyun 			offset = pc;
349*4882a593Smuzhiyun 		if (cookie == INVALID_COOKIE) {
350*4882a593Smuzhiyun 			atomic_inc(&oprofile_stats.sample_lost_no_mapping);
351*4882a593Smuzhiyun 			offset = pc;
352*4882a593Smuzhiyun 		}
353*4882a593Smuzhiyun 		if (cookie != last_cookie) {
354*4882a593Smuzhiyun 			add_cookie_switch(cookie);
355*4882a593Smuzhiyun 			last_cookie = cookie;
356*4882a593Smuzhiyun 		}
357*4882a593Smuzhiyun 	} else
358*4882a593Smuzhiyun 		offset = pc;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	add_event_entry(ESCAPE_CODE);
361*4882a593Smuzhiyun 	add_event_entry(code);
362*4882a593Smuzhiyun 	add_event_entry(offset);	/* Offset from Dcookie */
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	while (op_cpu_buffer_get_data(entry, &val))
365*4882a593Smuzhiyun 		add_event_entry(val);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
add_sample_entry(unsigned long offset,unsigned long event)368*4882a593Smuzhiyun static inline void add_sample_entry(unsigned long offset, unsigned long event)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	add_event_entry(offset);
371*4882a593Smuzhiyun 	add_event_entry(event);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun  * Add a sample to the global event buffer. If possible the
377*4882a593Smuzhiyun  * sample is converted into a persistent dentry/offset pair
378*4882a593Smuzhiyun  * for later lookup from userspace. Return 0 on failure.
379*4882a593Smuzhiyun  */
380*4882a593Smuzhiyun static int
add_sample(struct mm_struct * mm,struct op_sample * s,int in_kernel)381*4882a593Smuzhiyun add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	unsigned long cookie;
384*4882a593Smuzhiyun 	off_t offset;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (in_kernel) {
387*4882a593Smuzhiyun 		add_sample_entry(s->eip, s->event);
388*4882a593Smuzhiyun 		return 1;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/* add userspace sample */
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	if (!mm) {
394*4882a593Smuzhiyun 		atomic_inc(&oprofile_stats.sample_lost_no_mm);
395*4882a593Smuzhiyun 		return 0;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	cookie = lookup_dcookie(mm, s->eip, &offset);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (cookie == INVALID_COOKIE) {
401*4882a593Smuzhiyun 		atomic_inc(&oprofile_stats.sample_lost_no_mapping);
402*4882a593Smuzhiyun 		return 0;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	if (cookie != last_cookie) {
406*4882a593Smuzhiyun 		add_cookie_switch(cookie);
407*4882a593Smuzhiyun 		last_cookie = cookie;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	add_sample_entry(offset, s->event);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	return 1;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 
release_mm(struct mm_struct * mm)416*4882a593Smuzhiyun static void release_mm(struct mm_struct *mm)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	if (!mm)
419*4882a593Smuzhiyun 		return;
420*4882a593Smuzhiyun 	mmput(mm);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
is_code(unsigned long val)423*4882a593Smuzhiyun static inline int is_code(unsigned long val)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	return val == ESCAPE_CODE;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun /* Move tasks along towards death. Any tasks on dead_tasks
430*4882a593Smuzhiyun  * will definitely have no remaining references in any
431*4882a593Smuzhiyun  * CPU buffers at this point, because we use two lists,
432*4882a593Smuzhiyun  * and to have reached the list, it must have gone through
433*4882a593Smuzhiyun  * one full sync already.
434*4882a593Smuzhiyun  */
process_task_mortuary(void)435*4882a593Smuzhiyun static void process_task_mortuary(void)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	unsigned long flags;
438*4882a593Smuzhiyun 	LIST_HEAD(local_dead_tasks);
439*4882a593Smuzhiyun 	struct task_struct *task;
440*4882a593Smuzhiyun 	struct task_struct *ttask;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	spin_lock_irqsave(&task_mortuary, flags);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	list_splice_init(&dead_tasks, &local_dead_tasks);
445*4882a593Smuzhiyun 	list_splice_init(&dying_tasks, &dead_tasks);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	spin_unlock_irqrestore(&task_mortuary, flags);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
450*4882a593Smuzhiyun 		list_del(&task->tasks);
451*4882a593Smuzhiyun 		free_task(task);
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 
mark_done(int cpu)456*4882a593Smuzhiyun static void mark_done(int cpu)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	int i;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	cpumask_set_cpu(cpu, marked_cpus);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	for_each_online_cpu(i) {
463*4882a593Smuzhiyun 		if (!cpumask_test_cpu(i, marked_cpus))
464*4882a593Smuzhiyun 			return;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	/* All CPUs have been processed at least once,
468*4882a593Smuzhiyun 	 * we can process the mortuary once
469*4882a593Smuzhiyun 	 */
470*4882a593Smuzhiyun 	process_task_mortuary();
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	cpumask_clear(marked_cpus);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun /* FIXME: this is not sufficient if we implement syscall barrier backtrace
477*4882a593Smuzhiyun  * traversal, the code switch to sb_sample_start at first kernel enter/exit
478*4882a593Smuzhiyun  * switch so we need a fifth state and some special handling in sync_buffer()
479*4882a593Smuzhiyun  */
480*4882a593Smuzhiyun typedef enum {
481*4882a593Smuzhiyun 	sb_bt_ignore = -2,
482*4882a593Smuzhiyun 	sb_buffer_start,
483*4882a593Smuzhiyun 	sb_bt_start,
484*4882a593Smuzhiyun 	sb_sample_start,
485*4882a593Smuzhiyun } sync_buffer_state;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun /* Sync one of the CPU's buffers into the global event buffer.
488*4882a593Smuzhiyun  * Here we need to go through each batch of samples punctuated
489*4882a593Smuzhiyun  * by context switch notes, taking the task's mmap_lock and doing
490*4882a593Smuzhiyun  * lookup in task->mm->mmap to convert EIP into dcookie/offset
491*4882a593Smuzhiyun  * value.
492*4882a593Smuzhiyun  */
sync_buffer(int cpu)493*4882a593Smuzhiyun void sync_buffer(int cpu)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	struct mm_struct *mm = NULL;
496*4882a593Smuzhiyun 	struct mm_struct *oldmm;
497*4882a593Smuzhiyun 	unsigned long val;
498*4882a593Smuzhiyun 	struct task_struct *new;
499*4882a593Smuzhiyun 	unsigned long cookie = 0;
500*4882a593Smuzhiyun 	int in_kernel = 1;
501*4882a593Smuzhiyun 	sync_buffer_state state = sb_buffer_start;
502*4882a593Smuzhiyun 	unsigned int i;
503*4882a593Smuzhiyun 	unsigned long available;
504*4882a593Smuzhiyun 	unsigned long flags;
505*4882a593Smuzhiyun 	struct op_entry entry;
506*4882a593Smuzhiyun 	struct op_sample *sample;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	mutex_lock(&buffer_mutex);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	add_cpu_switch(cpu);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	op_cpu_buffer_reset(cpu);
513*4882a593Smuzhiyun 	available = op_cpu_buffer_entries(cpu);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	for (i = 0; i < available; ++i) {
516*4882a593Smuzhiyun 		sample = op_cpu_buffer_read_entry(&entry, cpu);
517*4882a593Smuzhiyun 		if (!sample)
518*4882a593Smuzhiyun 			break;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		if (is_code(sample->eip)) {
521*4882a593Smuzhiyun 			flags = sample->event;
522*4882a593Smuzhiyun 			if (flags & TRACE_BEGIN) {
523*4882a593Smuzhiyun 				state = sb_bt_start;
524*4882a593Smuzhiyun 				add_trace_begin();
525*4882a593Smuzhiyun 			}
526*4882a593Smuzhiyun 			if (flags & KERNEL_CTX_SWITCH) {
527*4882a593Smuzhiyun 				/* kernel/userspace switch */
528*4882a593Smuzhiyun 				in_kernel = flags & IS_KERNEL;
529*4882a593Smuzhiyun 				if (state == sb_buffer_start)
530*4882a593Smuzhiyun 					state = sb_sample_start;
531*4882a593Smuzhiyun 				add_kernel_ctx_switch(flags & IS_KERNEL);
532*4882a593Smuzhiyun 			}
533*4882a593Smuzhiyun 			if (flags & USER_CTX_SWITCH
534*4882a593Smuzhiyun 			    && op_cpu_buffer_get_data(&entry, &val)) {
535*4882a593Smuzhiyun 				/* userspace context switch */
536*4882a593Smuzhiyun 				new = (struct task_struct *)val;
537*4882a593Smuzhiyun 				oldmm = mm;
538*4882a593Smuzhiyun 				release_mm(oldmm);
539*4882a593Smuzhiyun 				mm = get_task_mm(new);
540*4882a593Smuzhiyun 				if (mm != oldmm)
541*4882a593Smuzhiyun 					cookie = get_exec_dcookie(mm);
542*4882a593Smuzhiyun 				add_user_ctx_switch(new, cookie);
543*4882a593Smuzhiyun 			}
544*4882a593Smuzhiyun 			if (op_cpu_buffer_get_size(&entry))
545*4882a593Smuzhiyun 				add_data(&entry, mm);
546*4882a593Smuzhiyun 			continue;
547*4882a593Smuzhiyun 		}
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		if (state < sb_bt_start)
550*4882a593Smuzhiyun 			/* ignore sample */
551*4882a593Smuzhiyun 			continue;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 		if (add_sample(mm, sample, in_kernel))
554*4882a593Smuzhiyun 			continue;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 		/* ignore backtraces if failed to add a sample */
557*4882a593Smuzhiyun 		if (state == sb_bt_start) {
558*4882a593Smuzhiyun 			state = sb_bt_ignore;
559*4882a593Smuzhiyun 			atomic_inc(&oprofile_stats.bt_lost_no_mapping);
560*4882a593Smuzhiyun 		}
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 	release_mm(mm);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	mark_done(cpu);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	mutex_unlock(&buffer_mutex);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun /* The function can be used to add a buffer worth of data directly to
570*4882a593Smuzhiyun  * the kernel buffer. The buffer is assumed to be a circular buffer.
571*4882a593Smuzhiyun  * Take the entries from index start and end at index end, wrapping
572*4882a593Smuzhiyun  * at max_entries.
573*4882a593Smuzhiyun  */
oprofile_put_buff(unsigned long * buf,unsigned int start,unsigned int stop,unsigned int max)574*4882a593Smuzhiyun void oprofile_put_buff(unsigned long *buf, unsigned int start,
575*4882a593Smuzhiyun 		       unsigned int stop, unsigned int max)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	int i;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	i = start;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	mutex_lock(&buffer_mutex);
582*4882a593Smuzhiyun 	while (i != stop) {
583*4882a593Smuzhiyun 		add_event_entry(buf[i++]);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 		if (i >= max)
586*4882a593Smuzhiyun 			i = 0;
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	mutex_unlock(&buffer_mutex);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
592