xref: /OK3568_Linux_fs/kernel/drivers/oprofile/event_buffer.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /**
2*4882a593Smuzhiyun  * @file event_buffer.c
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * @remark Copyright 2002 OProfile authors
5*4882a593Smuzhiyun  * @remark Read the file COPYING
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * @author John Levon <levon@movementarian.org>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This is the global event buffer that the user-space
10*4882a593Smuzhiyun  * daemon reads from. The event buffer is an untyped array
11*4882a593Smuzhiyun  * of unsigned longs. Entries are prefixed by the
12*4882a593Smuzhiyun  * escape value ESCAPE_CODE followed by an identifying code.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/vmalloc.h>
16*4882a593Smuzhiyun #include <linux/oprofile.h>
17*4882a593Smuzhiyun #include <linux/sched/signal.h>
18*4882a593Smuzhiyun #include <linux/capability.h>
19*4882a593Smuzhiyun #include <linux/dcookies.h>
20*4882a593Smuzhiyun #include <linux/fs.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "oprof.h"
24*4882a593Smuzhiyun #include "event_buffer.h"
25*4882a593Smuzhiyun #include "oprofile_stats.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun DEFINE_MUTEX(buffer_mutex);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static unsigned long buffer_opened;
30*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
31*4882a593Smuzhiyun static unsigned long *event_buffer;
32*4882a593Smuzhiyun static unsigned long buffer_size;
33*4882a593Smuzhiyun static unsigned long buffer_watershed;
34*4882a593Smuzhiyun static size_t buffer_pos;
35*4882a593Smuzhiyun /* atomic_t because wait_event checks it outside of buffer_mutex */
36*4882a593Smuzhiyun static atomic_t buffer_ready = ATOMIC_INIT(0);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * Add an entry to the event buffer. When we get near to the end we
40*4882a593Smuzhiyun  * wake up the process sleeping on the read() of the file. To protect
41*4882a593Smuzhiyun  * the event_buffer this function may only be called when buffer_mutex
42*4882a593Smuzhiyun  * is set.
43*4882a593Smuzhiyun  */
add_event_entry(unsigned long value)44*4882a593Smuzhiyun void add_event_entry(unsigned long value)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	/*
47*4882a593Smuzhiyun 	 * This shouldn't happen since all workqueues or handlers are
48*4882a593Smuzhiyun 	 * canceled or flushed before the event buffer is freed.
49*4882a593Smuzhiyun 	 */
50*4882a593Smuzhiyun 	if (!event_buffer) {
51*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
52*4882a593Smuzhiyun 		return;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	if (buffer_pos == buffer_size) {
56*4882a593Smuzhiyun 		atomic_inc(&oprofile_stats.event_lost_overflow);
57*4882a593Smuzhiyun 		return;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	event_buffer[buffer_pos] = value;
61*4882a593Smuzhiyun 	if (++buffer_pos == buffer_size - buffer_watershed) {
62*4882a593Smuzhiyun 		atomic_set(&buffer_ready, 1);
63*4882a593Smuzhiyun 		wake_up(&buffer_wait);
64*4882a593Smuzhiyun 	}
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* Wake up the waiting process if any. This happens
69*4882a593Smuzhiyun  * on "echo 0 >/dev/oprofile/enable" so the daemon
70*4882a593Smuzhiyun  * processes the data remaining in the event buffer.
71*4882a593Smuzhiyun  */
wake_up_buffer_waiter(void)72*4882a593Smuzhiyun void wake_up_buffer_waiter(void)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	mutex_lock(&buffer_mutex);
75*4882a593Smuzhiyun 	atomic_set(&buffer_ready, 1);
76*4882a593Smuzhiyun 	wake_up(&buffer_wait);
77*4882a593Smuzhiyun 	mutex_unlock(&buffer_mutex);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 
alloc_event_buffer(void)81*4882a593Smuzhiyun int alloc_event_buffer(void)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	unsigned long flags;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&oprofilefs_lock, flags);
86*4882a593Smuzhiyun 	buffer_size = oprofile_buffer_size;
87*4882a593Smuzhiyun 	buffer_watershed = oprofile_buffer_watershed;
88*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	if (buffer_watershed >= buffer_size)
91*4882a593Smuzhiyun 		return -EINVAL;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	buffer_pos = 0;
94*4882a593Smuzhiyun 	event_buffer = vmalloc(array_size(buffer_size, sizeof(unsigned long)));
95*4882a593Smuzhiyun 	if (!event_buffer)
96*4882a593Smuzhiyun 		return -ENOMEM;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 
free_event_buffer(void)102*4882a593Smuzhiyun void free_event_buffer(void)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	mutex_lock(&buffer_mutex);
105*4882a593Smuzhiyun 	vfree(event_buffer);
106*4882a593Smuzhiyun 	buffer_pos = 0;
107*4882a593Smuzhiyun 	event_buffer = NULL;
108*4882a593Smuzhiyun 	mutex_unlock(&buffer_mutex);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 
event_buffer_open(struct inode * inode,struct file * file)112*4882a593Smuzhiyun static int event_buffer_open(struct inode *inode, struct file *file)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	int err = -EPERM;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if (!perfmon_capable())
117*4882a593Smuzhiyun 		return -EPERM;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (test_and_set_bit_lock(0, &buffer_opened))
120*4882a593Smuzhiyun 		return -EBUSY;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* Register as a user of dcookies
123*4882a593Smuzhiyun 	 * to ensure they persist for the lifetime of
124*4882a593Smuzhiyun 	 * the open event file
125*4882a593Smuzhiyun 	 */
126*4882a593Smuzhiyun 	err = -EINVAL;
127*4882a593Smuzhiyun 	file->private_data = dcookie_register();
128*4882a593Smuzhiyun 	if (!file->private_data)
129*4882a593Smuzhiyun 		goto out;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if ((err = oprofile_setup()))
132*4882a593Smuzhiyun 		goto fail;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* NB: the actual start happens from userspace
135*4882a593Smuzhiyun 	 * echo 1 >/dev/oprofile/enable
136*4882a593Smuzhiyun 	 */
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return nonseekable_open(inode, file);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun fail:
141*4882a593Smuzhiyun 	dcookie_unregister(file->private_data);
142*4882a593Smuzhiyun out:
143*4882a593Smuzhiyun 	__clear_bit_unlock(0, &buffer_opened);
144*4882a593Smuzhiyun 	return err;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 
event_buffer_release(struct inode * inode,struct file * file)148*4882a593Smuzhiyun static int event_buffer_release(struct inode *inode, struct file *file)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	oprofile_stop();
151*4882a593Smuzhiyun 	oprofile_shutdown();
152*4882a593Smuzhiyun 	dcookie_unregister(file->private_data);
153*4882a593Smuzhiyun 	buffer_pos = 0;
154*4882a593Smuzhiyun 	atomic_set(&buffer_ready, 0);
155*4882a593Smuzhiyun 	__clear_bit_unlock(0, &buffer_opened);
156*4882a593Smuzhiyun 	return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 
event_buffer_read(struct file * file,char __user * buf,size_t count,loff_t * offset)160*4882a593Smuzhiyun static ssize_t event_buffer_read(struct file *file, char __user *buf,
161*4882a593Smuzhiyun 				 size_t count, loff_t *offset)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	int retval = -EINVAL;
164*4882a593Smuzhiyun 	size_t const max = buffer_size * sizeof(unsigned long);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* handling partial reads is more trouble than it's worth */
167*4882a593Smuzhiyun 	if (count != max || *offset)
168*4882a593Smuzhiyun 		return -EINVAL;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (signal_pending(current))
173*4882a593Smuzhiyun 		return -EINTR;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* can't currently happen */
176*4882a593Smuzhiyun 	if (!atomic_read(&buffer_ready))
177*4882a593Smuzhiyun 		return -EAGAIN;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	mutex_lock(&buffer_mutex);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* May happen if the buffer is freed during pending reads. */
182*4882a593Smuzhiyun 	if (!event_buffer) {
183*4882a593Smuzhiyun 		retval = -EINTR;
184*4882a593Smuzhiyun 		goto out;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	atomic_set(&buffer_ready, 0);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	retval = -EFAULT;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	count = buffer_pos * sizeof(unsigned long);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (copy_to_user(buf, event_buffer, count))
194*4882a593Smuzhiyun 		goto out;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	retval = count;
197*4882a593Smuzhiyun 	buffer_pos = 0;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun out:
200*4882a593Smuzhiyun 	mutex_unlock(&buffer_mutex);
201*4882a593Smuzhiyun 	return retval;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun const struct file_operations event_buffer_fops = {
205*4882a593Smuzhiyun 	.open		= event_buffer_open,
206*4882a593Smuzhiyun 	.release	= event_buffer_release,
207*4882a593Smuzhiyun 	.read		= event_buffer_read,
208*4882a593Smuzhiyun 	.llseek		= no_llseek,
209*4882a593Smuzhiyun };
210