1*4882a593Smuzhiyun /**
2*4882a593Smuzhiyun * @file cpu_buffer.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * @remark Copyright 2002-2009 OProfile authors
5*4882a593Smuzhiyun * @remark Read the file COPYING
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * @author John Levon <levon@movementarian.org>
8*4882a593Smuzhiyun * @author Barry Kasindorf <barry.kasindorf@amd.com>
9*4882a593Smuzhiyun * @author Robert Richter <robert.richter@amd.com>
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Each CPU has a local buffer that stores PC value/event
12*4882a593Smuzhiyun * pairs. We also log context switches when we notice them.
13*4882a593Smuzhiyun * Eventually each CPU's buffer is processed into the global
14*4882a593Smuzhiyun * event buffer by sync_buffer().
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * We use a local buffer for two reasons: an NMI or similar
17*4882a593Smuzhiyun * interrupt cannot synchronise, and high sampling rates
18*4882a593Smuzhiyun * would lead to catastrophic global synchronisation if
19*4882a593Smuzhiyun * a global buffer was used.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/sched.h>
23*4882a593Smuzhiyun #include <linux/oprofile.h>
24*4882a593Smuzhiyun #include <linux/errno.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <asm/ptrace.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "event_buffer.h"
29*4882a593Smuzhiyun #include "cpu_buffer.h"
30*4882a593Smuzhiyun #include "buffer_sync.h"
31*4882a593Smuzhiyun #include "oprof.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define OP_BUFFER_FLAGS 0
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun static struct trace_buffer *op_ring_buffer;
36*4882a593Smuzhiyun DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static void wq_sync_buffer(struct work_struct *work);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define DEFAULT_TIMER_EXPIRE (HZ / 10)
41*4882a593Smuzhiyun static int work_enabled;
42*4882a593Smuzhiyun
oprofile_get_cpu_buffer_size(void)43*4882a593Smuzhiyun unsigned long oprofile_get_cpu_buffer_size(void)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun return oprofile_cpu_buffer_size;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
oprofile_cpu_buffer_inc_smpl_lost(void)48*4882a593Smuzhiyun void oprofile_cpu_buffer_inc_smpl_lost(void)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun cpu_buf->sample_lost_overflow++;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
free_cpu_buffers(void)55*4882a593Smuzhiyun void free_cpu_buffers(void)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun if (op_ring_buffer)
58*4882a593Smuzhiyun ring_buffer_free(op_ring_buffer);
59*4882a593Smuzhiyun op_ring_buffer = NULL;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define RB_EVENT_HDR_SIZE 4
63*4882a593Smuzhiyun
alloc_cpu_buffers(void)64*4882a593Smuzhiyun int alloc_cpu_buffers(void)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun int i;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun unsigned long buffer_size = oprofile_cpu_buffer_size;
69*4882a593Smuzhiyun unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
70*4882a593Smuzhiyun RB_EVENT_HDR_SIZE);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
73*4882a593Smuzhiyun if (!op_ring_buffer)
74*4882a593Smuzhiyun goto fail;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun for_each_possible_cpu(i) {
77*4882a593Smuzhiyun struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun b->last_task = NULL;
80*4882a593Smuzhiyun b->last_is_kernel = -1;
81*4882a593Smuzhiyun b->tracing = 0;
82*4882a593Smuzhiyun b->buffer_size = buffer_size;
83*4882a593Smuzhiyun b->sample_received = 0;
84*4882a593Smuzhiyun b->sample_lost_overflow = 0;
85*4882a593Smuzhiyun b->backtrace_aborted = 0;
86*4882a593Smuzhiyun b->sample_invalid_eip = 0;
87*4882a593Smuzhiyun b->cpu = i;
88*4882a593Smuzhiyun INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun return 0;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun fail:
93*4882a593Smuzhiyun free_cpu_buffers();
94*4882a593Smuzhiyun return -ENOMEM;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
start_cpu_work(void)97*4882a593Smuzhiyun void start_cpu_work(void)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun int i;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun work_enabled = 1;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun for_each_online_cpu(i) {
104*4882a593Smuzhiyun struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * Spread the work by 1 jiffy per cpu so they dont all
108*4882a593Smuzhiyun * fire at once.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
end_cpu_work(void)114*4882a593Smuzhiyun void end_cpu_work(void)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun work_enabled = 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
flush_cpu_work(void)119*4882a593Smuzhiyun void flush_cpu_work(void)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun int i;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for_each_online_cpu(i) {
124*4882a593Smuzhiyun struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* these works are per-cpu, no need for flush_sync */
127*4882a593Smuzhiyun flush_delayed_work(&b->work);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * This function prepares the cpu buffer to write a sample.
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * Struct op_entry is used during operations on the ring buffer while
135*4882a593Smuzhiyun * struct op_sample contains the data that is stored in the ring
136*4882a593Smuzhiyun * buffer. Struct entry can be uninitialized. The function reserves a
137*4882a593Smuzhiyun * data array that is specified by size. Use
138*4882a593Smuzhiyun * op_cpu_buffer_write_commit() after preparing the sample. In case of
139*4882a593Smuzhiyun * errors a null pointer is returned, otherwise the pointer to the
140*4882a593Smuzhiyun * sample.
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun */
143*4882a593Smuzhiyun struct op_sample
op_cpu_buffer_write_reserve(struct op_entry * entry,unsigned long size)144*4882a593Smuzhiyun *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun entry->event = ring_buffer_lock_reserve
147*4882a593Smuzhiyun (op_ring_buffer, sizeof(struct op_sample) +
148*4882a593Smuzhiyun size * sizeof(entry->sample->data[0]));
149*4882a593Smuzhiyun if (!entry->event)
150*4882a593Smuzhiyun return NULL;
151*4882a593Smuzhiyun entry->sample = ring_buffer_event_data(entry->event);
152*4882a593Smuzhiyun entry->size = size;
153*4882a593Smuzhiyun entry->data = entry->sample->data;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return entry->sample;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
op_cpu_buffer_write_commit(struct op_entry * entry)158*4882a593Smuzhiyun int op_cpu_buffer_write_commit(struct op_entry *entry)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
op_cpu_buffer_read_entry(struct op_entry * entry,int cpu)163*4882a593Smuzhiyun struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct ring_buffer_event *e;
166*4882a593Smuzhiyun e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
167*4882a593Smuzhiyun if (!e)
168*4882a593Smuzhiyun return NULL;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun entry->event = e;
171*4882a593Smuzhiyun entry->sample = ring_buffer_event_data(e);
172*4882a593Smuzhiyun entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
173*4882a593Smuzhiyun / sizeof(entry->sample->data[0]);
174*4882a593Smuzhiyun entry->data = entry->sample->data;
175*4882a593Smuzhiyun return entry->sample;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
op_cpu_buffer_entries(int cpu)178*4882a593Smuzhiyun unsigned long op_cpu_buffer_entries(int cpu)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun return ring_buffer_entries_cpu(op_ring_buffer, cpu);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun static int
op_add_code(struct oprofile_cpu_buffer * cpu_buf,unsigned long backtrace,int is_kernel,struct task_struct * task)184*4882a593Smuzhiyun op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
185*4882a593Smuzhiyun int is_kernel, struct task_struct *task)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun struct op_entry entry;
188*4882a593Smuzhiyun struct op_sample *sample;
189*4882a593Smuzhiyun unsigned long flags;
190*4882a593Smuzhiyun int size;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun flags = 0;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (backtrace)
195*4882a593Smuzhiyun flags |= TRACE_BEGIN;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* notice a switch from user->kernel or vice versa */
198*4882a593Smuzhiyun is_kernel = !!is_kernel;
199*4882a593Smuzhiyun if (cpu_buf->last_is_kernel != is_kernel) {
200*4882a593Smuzhiyun cpu_buf->last_is_kernel = is_kernel;
201*4882a593Smuzhiyun flags |= KERNEL_CTX_SWITCH;
202*4882a593Smuzhiyun if (is_kernel)
203*4882a593Smuzhiyun flags |= IS_KERNEL;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* notice a task switch */
207*4882a593Smuzhiyun if (cpu_buf->last_task != task) {
208*4882a593Smuzhiyun cpu_buf->last_task = task;
209*4882a593Smuzhiyun flags |= USER_CTX_SWITCH;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (!flags)
213*4882a593Smuzhiyun /* nothing to do */
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (flags & USER_CTX_SWITCH)
217*4882a593Smuzhiyun size = 1;
218*4882a593Smuzhiyun else
219*4882a593Smuzhiyun size = 0;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun sample = op_cpu_buffer_write_reserve(&entry, size);
222*4882a593Smuzhiyun if (!sample)
223*4882a593Smuzhiyun return -ENOMEM;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun sample->eip = ESCAPE_CODE;
226*4882a593Smuzhiyun sample->event = flags;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (size)
229*4882a593Smuzhiyun op_cpu_buffer_add_data(&entry, (unsigned long)task);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun op_cpu_buffer_write_commit(&entry);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun static inline int
op_add_sample(struct oprofile_cpu_buffer * cpu_buf,unsigned long pc,unsigned long event)237*4882a593Smuzhiyun op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
238*4882a593Smuzhiyun unsigned long pc, unsigned long event)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun struct op_entry entry;
241*4882a593Smuzhiyun struct op_sample *sample;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun sample = op_cpu_buffer_write_reserve(&entry, 0);
244*4882a593Smuzhiyun if (!sample)
245*4882a593Smuzhiyun return -ENOMEM;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun sample->eip = pc;
248*4882a593Smuzhiyun sample->event = event;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun return op_cpu_buffer_write_commit(&entry);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * This must be safe from any context.
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun * is_kernel is needed because on some architectures you cannot
257*4882a593Smuzhiyun * tell if you are in kernel or user space simply by looking at
258*4882a593Smuzhiyun * pc. We tag this in the buffer by generating kernel enter/exit
259*4882a593Smuzhiyun * events whenever is_kernel changes
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun static int
log_sample(struct oprofile_cpu_buffer * cpu_buf,unsigned long pc,unsigned long backtrace,int is_kernel,unsigned long event,struct task_struct * task)262*4882a593Smuzhiyun log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
263*4882a593Smuzhiyun unsigned long backtrace, int is_kernel, unsigned long event,
264*4882a593Smuzhiyun struct task_struct *task)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct task_struct *tsk = task ? task : current;
267*4882a593Smuzhiyun cpu_buf->sample_received++;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (pc == ESCAPE_CODE) {
270*4882a593Smuzhiyun cpu_buf->sample_invalid_eip++;
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
275*4882a593Smuzhiyun goto fail;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (op_add_sample(cpu_buf, pc, event))
278*4882a593Smuzhiyun goto fail;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return 1;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun fail:
283*4882a593Smuzhiyun cpu_buf->sample_lost_overflow++;
284*4882a593Smuzhiyun return 0;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)287*4882a593Smuzhiyun static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun cpu_buf->tracing = 1;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)292*4882a593Smuzhiyun static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun cpu_buf->tracing = 0;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun static inline void
__oprofile_add_ext_sample(unsigned long pc,struct pt_regs * const regs,unsigned long event,int is_kernel,struct task_struct * task)298*4882a593Smuzhiyun __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
299*4882a593Smuzhiyun unsigned long event, int is_kernel,
300*4882a593Smuzhiyun struct task_struct *task)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
303*4882a593Smuzhiyun unsigned long backtrace = oprofile_backtrace_depth;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * if log_sample() fail we can't backtrace since we lost the
307*4882a593Smuzhiyun * source of this event
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task))
310*4882a593Smuzhiyun /* failed */
311*4882a593Smuzhiyun return;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (!backtrace)
314*4882a593Smuzhiyun return;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun oprofile_begin_trace(cpu_buf);
317*4882a593Smuzhiyun oprofile_ops.backtrace(regs, backtrace);
318*4882a593Smuzhiyun oprofile_end_trace(cpu_buf);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
oprofile_add_ext_hw_sample(unsigned long pc,struct pt_regs * const regs,unsigned long event,int is_kernel,struct task_struct * task)321*4882a593Smuzhiyun void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
322*4882a593Smuzhiyun unsigned long event, int is_kernel,
323*4882a593Smuzhiyun struct task_struct *task)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun __oprofile_add_ext_sample(pc, regs, event, is_kernel, task);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
oprofile_add_ext_sample(unsigned long pc,struct pt_regs * const regs,unsigned long event,int is_kernel)328*4882a593Smuzhiyun void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
329*4882a593Smuzhiyun unsigned long event, int is_kernel)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
oprofile_add_sample(struct pt_regs * const regs,unsigned long event)334*4882a593Smuzhiyun void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun int is_kernel;
337*4882a593Smuzhiyun unsigned long pc;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (likely(regs)) {
340*4882a593Smuzhiyun is_kernel = !user_mode(regs);
341*4882a593Smuzhiyun pc = profile_pc(regs);
342*4882a593Smuzhiyun } else {
343*4882a593Smuzhiyun is_kernel = 0; /* This value will not be used */
344*4882a593Smuzhiyun pc = ESCAPE_CODE; /* as this causes an early return. */
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * Add samples with data to the ring buffer.
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * Use oprofile_add_data(&entry, val) to add data and
354*4882a593Smuzhiyun * oprofile_write_commit(&entry) to commit the sample.
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun void
oprofile_write_reserve(struct op_entry * entry,struct pt_regs * const regs,unsigned long pc,int code,int size)357*4882a593Smuzhiyun oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
358*4882a593Smuzhiyun unsigned long pc, int code, int size)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct op_sample *sample;
361*4882a593Smuzhiyun int is_kernel = !user_mode(regs);
362*4882a593Smuzhiyun struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun cpu_buf->sample_received++;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* no backtraces for samples with data */
367*4882a593Smuzhiyun if (op_add_code(cpu_buf, 0, is_kernel, current))
368*4882a593Smuzhiyun goto fail;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun sample = op_cpu_buffer_write_reserve(entry, size + 2);
371*4882a593Smuzhiyun if (!sample)
372*4882a593Smuzhiyun goto fail;
373*4882a593Smuzhiyun sample->eip = ESCAPE_CODE;
374*4882a593Smuzhiyun sample->event = 0; /* no flags */
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun op_cpu_buffer_add_data(entry, code);
377*4882a593Smuzhiyun op_cpu_buffer_add_data(entry, pc);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun fail:
382*4882a593Smuzhiyun entry->event = NULL;
383*4882a593Smuzhiyun cpu_buf->sample_lost_overflow++;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
oprofile_add_data(struct op_entry * entry,unsigned long val)386*4882a593Smuzhiyun int oprofile_add_data(struct op_entry *entry, unsigned long val)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun if (!entry->event)
389*4882a593Smuzhiyun return 0;
390*4882a593Smuzhiyun return op_cpu_buffer_add_data(entry, val);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
oprofile_add_data64(struct op_entry * entry,u64 val)393*4882a593Smuzhiyun int oprofile_add_data64(struct op_entry *entry, u64 val)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun if (!entry->event)
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun if (op_cpu_buffer_get_size(entry) < 2)
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * the function returns 0 to indicate a too small
400*4882a593Smuzhiyun * buffer, even if there is some space left
401*4882a593Smuzhiyun */
402*4882a593Smuzhiyun return 0;
403*4882a593Smuzhiyun if (!op_cpu_buffer_add_data(entry, (u32)val))
404*4882a593Smuzhiyun return 0;
405*4882a593Smuzhiyun return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
oprofile_write_commit(struct op_entry * entry)408*4882a593Smuzhiyun int oprofile_write_commit(struct op_entry *entry)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun if (!entry->event)
411*4882a593Smuzhiyun return -EINVAL;
412*4882a593Smuzhiyun return op_cpu_buffer_write_commit(entry);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
oprofile_add_pc(unsigned long pc,int is_kernel,unsigned long event)415*4882a593Smuzhiyun void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
418*4882a593Smuzhiyun log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
oprofile_add_trace(unsigned long pc)421*4882a593Smuzhiyun void oprofile_add_trace(unsigned long pc)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (!cpu_buf->tracing)
426*4882a593Smuzhiyun return;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * broken frame can give an eip with the same value as an
430*4882a593Smuzhiyun * escape code, abort the trace if we get it
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun if (pc == ESCAPE_CODE)
433*4882a593Smuzhiyun goto fail;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (op_add_sample(cpu_buf, pc, 0))
436*4882a593Smuzhiyun goto fail;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return;
439*4882a593Smuzhiyun fail:
440*4882a593Smuzhiyun cpu_buf->tracing = 0;
441*4882a593Smuzhiyun cpu_buf->backtrace_aborted++;
442*4882a593Smuzhiyun return;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun * This serves to avoid cpu buffer overflow, and makes sure
447*4882a593Smuzhiyun * the task mortuary progresses
448*4882a593Smuzhiyun *
449*4882a593Smuzhiyun * By using schedule_delayed_work_on and then schedule_delayed_work
450*4882a593Smuzhiyun * we guarantee this will stay on the correct cpu
451*4882a593Smuzhiyun */
wq_sync_buffer(struct work_struct * work)452*4882a593Smuzhiyun static void wq_sync_buffer(struct work_struct *work)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct oprofile_cpu_buffer *b =
455*4882a593Smuzhiyun container_of(work, struct oprofile_cpu_buffer, work.work);
456*4882a593Smuzhiyun if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) {
457*4882a593Smuzhiyun cancel_delayed_work(&b->work);
458*4882a593Smuzhiyun return;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun sync_buffer(b->cpu);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* don't re-add the work if we're shutting down */
463*4882a593Smuzhiyun if (work_enabled)
464*4882a593Smuzhiyun schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
465*4882a593Smuzhiyun }
466