1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ring buffer based function tracer
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6*4882a593Smuzhiyun * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Originally taken from the RT patch by:
9*4882a593Smuzhiyun * Arnaldo Carvalho de Melo <acme@redhat.com>
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Based on code from the latency_tracer, that is:
12*4882a593Smuzhiyun * Copyright (C) 2004-2006 Ingo Molnar
13*4882a593Smuzhiyun * Copyright (C) 2004 Nadia Yvette Chambers
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun #include <linux/ring_buffer.h>
16*4882a593Smuzhiyun #include <generated/utsrelease.h>
17*4882a593Smuzhiyun #include <linux/stacktrace.h>
18*4882a593Smuzhiyun #include <linux/writeback.h>
19*4882a593Smuzhiyun #include <linux/kallsyms.h>
20*4882a593Smuzhiyun #include <linux/security.h>
21*4882a593Smuzhiyun #include <linux/seq_file.h>
22*4882a593Smuzhiyun #include <linux/notifier.h>
23*4882a593Smuzhiyun #include <linux/irqflags.h>
24*4882a593Smuzhiyun #include <linux/debugfs.h>
25*4882a593Smuzhiyun #include <linux/tracefs.h>
26*4882a593Smuzhiyun #include <linux/pagemap.h>
27*4882a593Smuzhiyun #include <linux/hardirq.h>
28*4882a593Smuzhiyun #include <linux/linkage.h>
29*4882a593Smuzhiyun #include <linux/uaccess.h>
30*4882a593Smuzhiyun #include <linux/vmalloc.h>
31*4882a593Smuzhiyun #include <linux/ftrace.h>
32*4882a593Smuzhiyun #include <linux/module.h>
33*4882a593Smuzhiyun #include <linux/percpu.h>
34*4882a593Smuzhiyun #include <linux/splice.h>
35*4882a593Smuzhiyun #include <linux/kdebug.h>
36*4882a593Smuzhiyun #include <linux/string.h>
37*4882a593Smuzhiyun #include <linux/mount.h>
38*4882a593Smuzhiyun #include <linux/rwsem.h>
39*4882a593Smuzhiyun #include <linux/slab.h>
40*4882a593Smuzhiyun #include <linux/ctype.h>
41*4882a593Smuzhiyun #include <linux/init.h>
42*4882a593Smuzhiyun #include <linux/poll.h>
43*4882a593Smuzhiyun #include <linux/nmi.h>
44*4882a593Smuzhiyun #include <linux/fs.h>
45*4882a593Smuzhiyun #include <linux/trace.h>
46*4882a593Smuzhiyun #include <linux/sched/clock.h>
47*4882a593Smuzhiyun #include <linux/sched/rt.h>
48*4882a593Smuzhiyun #include <linux/fsnotify.h>
49*4882a593Smuzhiyun #include <linux/irq_work.h>
50*4882a593Smuzhiyun #include <linux/workqueue.h>
51*4882a593Smuzhiyun #include <trace/hooks/ftrace_dump.h>
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #include "trace.h"
54*4882a593Smuzhiyun #include "trace_output.h"
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * On boot up, the ring buffer is set to the minimum size, so that
58*4882a593Smuzhiyun * we do not waste memory on systems that are not using tracing.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun bool ring_buffer_expanded;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * We need to change this state when a selftest is running.
64*4882a593Smuzhiyun * A selftest will lurk into the ring-buffer to count the
65*4882a593Smuzhiyun * entries inserted during the selftest although some concurrent
66*4882a593Smuzhiyun * insertions into the ring-buffer such as trace_printk could occurred
67*4882a593Smuzhiyun * at the same time, giving false positive or negative results.
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun static bool __read_mostly tracing_selftest_running;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun * If boot-time tracing including tracers/events via kernel cmdline
73*4882a593Smuzhiyun * is running, we do not want to run SELFTEST.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun bool __read_mostly tracing_selftest_disabled;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #ifdef CONFIG_FTRACE_STARTUP_TEST
disable_tracing_selftest(const char * reason)78*4882a593Smuzhiyun void __init disable_tracing_selftest(const char *reason)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun if (!tracing_selftest_disabled) {
81*4882a593Smuzhiyun tracing_selftest_disabled = true;
82*4882a593Smuzhiyun pr_info("Ftrace startup test is disabled due to %s\n", reason);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Pipe tracepoints to printk */
88*4882a593Smuzhiyun struct trace_iterator *tracepoint_print_iter;
89*4882a593Smuzhiyun int tracepoint_printk;
90*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* For tracers that don't implement custom flags */
93*4882a593Smuzhiyun static struct tracer_opt dummy_tracer_opt[] = {
94*4882a593Smuzhiyun { }
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun static int
dummy_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)98*4882a593Smuzhiyun dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun return 0;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * To prevent the comm cache from being overwritten when no
105*4882a593Smuzhiyun * tracing is active, only save the comm when a trace event
106*4882a593Smuzhiyun * occurred.
107*4882a593Smuzhiyun */
108*4882a593Smuzhiyun static DEFINE_PER_CPU(bool, trace_taskinfo_save);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * Kill all tracing for good (never come back).
112*4882a593Smuzhiyun * It is initialized to 1 but will turn to zero if the initialization
113*4882a593Smuzhiyun * of the tracer is successful. But that is the only place that sets
114*4882a593Smuzhiyun * this back to zero.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun static int tracing_disabled = 1;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun cpumask_var_t __read_mostly tracing_buffer_mask;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124*4882a593Smuzhiyun * is set, then ftrace_dump is called. This will output the contents
125*4882a593Smuzhiyun * of the ftrace buffers to the console. This is very useful for
126*4882a593Smuzhiyun * capturing traces that lead to crashes and outputing it to a
127*4882a593Smuzhiyun * serial console.
128*4882a593Smuzhiyun *
129*4882a593Smuzhiyun * It is default off, but you can enable it with either specifying
130*4882a593Smuzhiyun * "ftrace_dump_on_oops" in the kernel command line, or setting
131*4882a593Smuzhiyun * /proc/sys/kernel/ftrace_dump_on_oops
132*4882a593Smuzhiyun * Set 1 if you want to dump buffers of all CPUs
133*4882a593Smuzhiyun * Set 2 if you want to dump the buffer of the CPU that triggered oops
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun enum ftrace_dump_mode ftrace_dump_on_oops;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* When set, tracing will stop when a WARN*() is hit */
139*4882a593Smuzhiyun int __disable_trace_on_warning;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #ifdef CONFIG_TRACE_EVAL_MAP_FILE
142*4882a593Smuzhiyun /* Map of enums to their values, for "eval_map" file */
143*4882a593Smuzhiyun struct trace_eval_map_head {
144*4882a593Smuzhiyun struct module *mod;
145*4882a593Smuzhiyun unsigned long length;
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun union trace_eval_map_item;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun struct trace_eval_map_tail {
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * "end" is first and points to NULL as it must be different
153*4882a593Smuzhiyun * than "mod" or "eval_string"
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun union trace_eval_map_item *next;
156*4882a593Smuzhiyun const char *end; /* points to NULL */
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun static DEFINE_MUTEX(trace_eval_mutex);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * The trace_eval_maps are saved in an array with two extra elements,
163*4882a593Smuzhiyun * one at the beginning, and one at the end. The beginning item contains
164*4882a593Smuzhiyun * the count of the saved maps (head.length), and the module they
165*4882a593Smuzhiyun * belong to if not built in (head.mod). The ending item contains a
166*4882a593Smuzhiyun * pointer to the next array of saved eval_map items.
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun union trace_eval_map_item {
169*4882a593Smuzhiyun struct trace_eval_map map;
170*4882a593Smuzhiyun struct trace_eval_map_head head;
171*4882a593Smuzhiyun struct trace_eval_map_tail tail;
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun static union trace_eval_map_item *trace_eval_maps;
175*4882a593Smuzhiyun #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun int tracing_set_tracer(struct trace_array *tr, const char *buf);
178*4882a593Smuzhiyun static void ftrace_trace_userstack(struct trace_array *tr,
179*4882a593Smuzhiyun struct trace_buffer *buffer,
180*4882a593Smuzhiyun unsigned long flags, int pc);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun #define MAX_TRACER_SIZE 100
183*4882a593Smuzhiyun static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
184*4882a593Smuzhiyun static char *default_bootup_tracer;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun static bool allocate_snapshot;
187*4882a593Smuzhiyun
set_cmdline_ftrace(char * str)188*4882a593Smuzhiyun static int __init set_cmdline_ftrace(char *str)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
191*4882a593Smuzhiyun default_bootup_tracer = bootup_tracer_buf;
192*4882a593Smuzhiyun /* We are using ftrace early, expand it */
193*4882a593Smuzhiyun ring_buffer_expanded = true;
194*4882a593Smuzhiyun return 1;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun __setup("ftrace=", set_cmdline_ftrace);
197*4882a593Smuzhiyun
set_ftrace_dump_on_oops(char * str)198*4882a593Smuzhiyun static int __init set_ftrace_dump_on_oops(char *str)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun if (*str++ != '=' || !*str) {
201*4882a593Smuzhiyun ftrace_dump_on_oops = DUMP_ALL;
202*4882a593Smuzhiyun return 1;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (!strcmp("orig_cpu", str)) {
206*4882a593Smuzhiyun ftrace_dump_on_oops = DUMP_ORIG;
207*4882a593Smuzhiyun return 1;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun return 0;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
213*4882a593Smuzhiyun
stop_trace_on_warning(char * str)214*4882a593Smuzhiyun static int __init stop_trace_on_warning(char *str)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
217*4882a593Smuzhiyun __disable_trace_on_warning = 1;
218*4882a593Smuzhiyun return 1;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun __setup("traceoff_on_warning", stop_trace_on_warning);
221*4882a593Smuzhiyun
boot_alloc_snapshot(char * str)222*4882a593Smuzhiyun static int __init boot_alloc_snapshot(char *str)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun allocate_snapshot = true;
225*4882a593Smuzhiyun /* We also need the main ring buffer expanded */
226*4882a593Smuzhiyun ring_buffer_expanded = true;
227*4882a593Smuzhiyun return 1;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun __setup("alloc_snapshot", boot_alloc_snapshot);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
233*4882a593Smuzhiyun
set_trace_boot_options(char * str)234*4882a593Smuzhiyun static int __init set_trace_boot_options(char *str)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
237*4882a593Smuzhiyun return 1;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun __setup("trace_options=", set_trace_boot_options);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
242*4882a593Smuzhiyun static char *trace_boot_clock __initdata;
243*4882a593Smuzhiyun
set_trace_boot_clock(char * str)244*4882a593Smuzhiyun static int __init set_trace_boot_clock(char *str)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
247*4882a593Smuzhiyun trace_boot_clock = trace_boot_clock_buf;
248*4882a593Smuzhiyun return 1;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun __setup("trace_clock=", set_trace_boot_clock);
251*4882a593Smuzhiyun
set_tracepoint_printk(char * str)252*4882a593Smuzhiyun static int __init set_tracepoint_printk(char *str)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun /* Ignore the "tp_printk_stop_on_boot" param */
255*4882a593Smuzhiyun if (*str == '_')
256*4882a593Smuzhiyun return 0;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
259*4882a593Smuzhiyun tracepoint_printk = 1;
260*4882a593Smuzhiyun return 1;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun __setup("tp_printk", set_tracepoint_printk);
263*4882a593Smuzhiyun
ns2usecs(u64 nsec)264*4882a593Smuzhiyun unsigned long long ns2usecs(u64 nsec)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun nsec += 500;
267*4882a593Smuzhiyun do_div(nsec, 1000);
268*4882a593Smuzhiyun return nsec;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun static void
trace_process_export(struct trace_export * export,struct ring_buffer_event * event,int flag)272*4882a593Smuzhiyun trace_process_export(struct trace_export *export,
273*4882a593Smuzhiyun struct ring_buffer_event *event, int flag)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct trace_entry *entry;
276*4882a593Smuzhiyun unsigned int size = 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (export->flags & flag) {
279*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
280*4882a593Smuzhiyun size = ring_buffer_event_length(event);
281*4882a593Smuzhiyun export->write(export, entry, size);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun static DEFINE_MUTEX(ftrace_export_lock);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun static struct trace_export __rcu *ftrace_exports_list __read_mostly;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
290*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
291*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
292*4882a593Smuzhiyun
ftrace_exports_enable(struct trace_export * export)293*4882a593Smuzhiyun static inline void ftrace_exports_enable(struct trace_export *export)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun if (export->flags & TRACE_EXPORT_FUNCTION)
296*4882a593Smuzhiyun static_branch_inc(&trace_function_exports_enabled);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (export->flags & TRACE_EXPORT_EVENT)
299*4882a593Smuzhiyun static_branch_inc(&trace_event_exports_enabled);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (export->flags & TRACE_EXPORT_MARKER)
302*4882a593Smuzhiyun static_branch_inc(&trace_marker_exports_enabled);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
ftrace_exports_disable(struct trace_export * export)305*4882a593Smuzhiyun static inline void ftrace_exports_disable(struct trace_export *export)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun if (export->flags & TRACE_EXPORT_FUNCTION)
308*4882a593Smuzhiyun static_branch_dec(&trace_function_exports_enabled);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (export->flags & TRACE_EXPORT_EVENT)
311*4882a593Smuzhiyun static_branch_dec(&trace_event_exports_enabled);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (export->flags & TRACE_EXPORT_MARKER)
314*4882a593Smuzhiyun static_branch_dec(&trace_marker_exports_enabled);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
ftrace_exports(struct ring_buffer_event * event,int flag)317*4882a593Smuzhiyun static void ftrace_exports(struct ring_buffer_event *event, int flag)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun struct trace_export *export;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun preempt_disable_notrace();
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun export = rcu_dereference_raw_check(ftrace_exports_list);
324*4882a593Smuzhiyun while (export) {
325*4882a593Smuzhiyun trace_process_export(export, event, flag);
326*4882a593Smuzhiyun export = rcu_dereference_raw_check(export->next);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun preempt_enable_notrace();
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun static inline void
add_trace_export(struct trace_export ** list,struct trace_export * export)333*4882a593Smuzhiyun add_trace_export(struct trace_export **list, struct trace_export *export)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun rcu_assign_pointer(export->next, *list);
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun * We are entering export into the list but another
338*4882a593Smuzhiyun * CPU might be walking that list. We need to make sure
339*4882a593Smuzhiyun * the export->next pointer is valid before another CPU sees
340*4882a593Smuzhiyun * the export pointer included into the list.
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun rcu_assign_pointer(*list, export);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun static inline int
rm_trace_export(struct trace_export ** list,struct trace_export * export)346*4882a593Smuzhiyun rm_trace_export(struct trace_export **list, struct trace_export *export)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun struct trace_export **p;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun for (p = list; *p != NULL; p = &(*p)->next)
351*4882a593Smuzhiyun if (*p == export)
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (*p != export)
355*4882a593Smuzhiyun return -1;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun rcu_assign_pointer(*p, (*p)->next);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun return 0;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun static inline void
add_ftrace_export(struct trace_export ** list,struct trace_export * export)363*4882a593Smuzhiyun add_ftrace_export(struct trace_export **list, struct trace_export *export)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun ftrace_exports_enable(export);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun add_trace_export(list, export);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun static inline int
rm_ftrace_export(struct trace_export ** list,struct trace_export * export)371*4882a593Smuzhiyun rm_ftrace_export(struct trace_export **list, struct trace_export *export)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun int ret;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun ret = rm_trace_export(list, export);
376*4882a593Smuzhiyun ftrace_exports_disable(export);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun return ret;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
register_ftrace_export(struct trace_export * export)381*4882a593Smuzhiyun int register_ftrace_export(struct trace_export *export)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun if (WARN_ON_ONCE(!export->write))
384*4882a593Smuzhiyun return -1;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun mutex_lock(&ftrace_export_lock);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun add_ftrace_export(&ftrace_exports_list, export);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun mutex_unlock(&ftrace_export_lock);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(register_ftrace_export);
395*4882a593Smuzhiyun
unregister_ftrace_export(struct trace_export * export)396*4882a593Smuzhiyun int unregister_ftrace_export(struct trace_export *export)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun int ret;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun mutex_lock(&ftrace_export_lock);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun ret = rm_ftrace_export(&ftrace_exports_list, export);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun mutex_unlock(&ftrace_export_lock);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return ret;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(unregister_ftrace_export);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* trace_flags holds trace_options default values */
411*4882a593Smuzhiyun #define TRACE_DEFAULT_FLAGS \
412*4882a593Smuzhiyun (FUNCTION_DEFAULT_FLAGS | \
413*4882a593Smuzhiyun TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
414*4882a593Smuzhiyun TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
415*4882a593Smuzhiyun TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
416*4882a593Smuzhiyun TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* trace_options that are only supported by global_trace */
419*4882a593Smuzhiyun #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
420*4882a593Smuzhiyun TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* trace_flags that are default zero for instances */
423*4882a593Smuzhiyun #define ZEROED_TRACE_FLAGS \
424*4882a593Smuzhiyun (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /*
427*4882a593Smuzhiyun * The global_trace is the descriptor that holds the top-level tracing
428*4882a593Smuzhiyun * buffers for the live tracing.
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun static struct trace_array global_trace = {
431*4882a593Smuzhiyun .trace_flags = TRACE_DEFAULT_FLAGS,
432*4882a593Smuzhiyun };
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun LIST_HEAD(ftrace_trace_arrays);
435*4882a593Smuzhiyun
trace_array_get(struct trace_array * this_tr)436*4882a593Smuzhiyun int trace_array_get(struct trace_array *this_tr)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct trace_array *tr;
439*4882a593Smuzhiyun int ret = -ENODEV;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
442*4882a593Smuzhiyun list_for_each_entry(tr, &ftrace_trace_arrays, list) {
443*4882a593Smuzhiyun if (tr == this_tr) {
444*4882a593Smuzhiyun tr->ref++;
445*4882a593Smuzhiyun ret = 0;
446*4882a593Smuzhiyun break;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return ret;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
__trace_array_put(struct trace_array * this_tr)454*4882a593Smuzhiyun static void __trace_array_put(struct trace_array *this_tr)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun WARN_ON(!this_tr->ref);
457*4882a593Smuzhiyun this_tr->ref--;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /**
461*4882a593Smuzhiyun * trace_array_put - Decrement the reference counter for this trace array.
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * NOTE: Use this when we no longer need the trace array returned by
464*4882a593Smuzhiyun * trace_array_get_by_name(). This ensures the trace array can be later
465*4882a593Smuzhiyun * destroyed.
466*4882a593Smuzhiyun *
467*4882a593Smuzhiyun */
trace_array_put(struct trace_array * this_tr)468*4882a593Smuzhiyun void trace_array_put(struct trace_array *this_tr)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun if (!this_tr)
471*4882a593Smuzhiyun return;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
474*4882a593Smuzhiyun __trace_array_put(this_tr);
475*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_array_put);
478*4882a593Smuzhiyun
tracing_check_open_get_tr(struct trace_array * tr)479*4882a593Smuzhiyun int tracing_check_open_get_tr(struct trace_array *tr)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun int ret;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun ret = security_locked_down(LOCKDOWN_TRACEFS);
484*4882a593Smuzhiyun if (ret)
485*4882a593Smuzhiyun return ret;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (tracing_disabled)
488*4882a593Smuzhiyun return -ENODEV;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (tr && trace_array_get(tr) < 0)
491*4882a593Smuzhiyun return -ENODEV;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return 0;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
call_filter_check_discard(struct trace_event_call * call,void * rec,struct trace_buffer * buffer,struct ring_buffer_event * event)496*4882a593Smuzhiyun int call_filter_check_discard(struct trace_event_call *call, void *rec,
497*4882a593Smuzhiyun struct trace_buffer *buffer,
498*4882a593Smuzhiyun struct ring_buffer_event *event)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
501*4882a593Smuzhiyun !filter_match_preds(call->filter, rec)) {
502*4882a593Smuzhiyun __trace_event_discard_commit(buffer, event);
503*4882a593Smuzhiyun return 1;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun return 0;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
trace_free_pid_list(struct trace_pid_list * pid_list)509*4882a593Smuzhiyun void trace_free_pid_list(struct trace_pid_list *pid_list)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun vfree(pid_list->pids);
512*4882a593Smuzhiyun kfree(pid_list);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /**
516*4882a593Smuzhiyun * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
517*4882a593Smuzhiyun * @filtered_pids: The list of pids to check
518*4882a593Smuzhiyun * @search_pid: The PID to find in @filtered_pids
519*4882a593Smuzhiyun *
520*4882a593Smuzhiyun * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun bool
trace_find_filtered_pid(struct trace_pid_list * filtered_pids,pid_t search_pid)523*4882a593Smuzhiyun trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun * If pid_max changed after filtered_pids was created, we
527*4882a593Smuzhiyun * by default ignore all pids greater than the previous pid_max.
528*4882a593Smuzhiyun */
529*4882a593Smuzhiyun if (search_pid >= filtered_pids->pid_max)
530*4882a593Smuzhiyun return false;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return test_bit(search_pid, filtered_pids->pids);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /**
536*4882a593Smuzhiyun * trace_ignore_this_task - should a task be ignored for tracing
537*4882a593Smuzhiyun * @filtered_pids: The list of pids to check
538*4882a593Smuzhiyun * @task: The task that should be ignored if not filtered
539*4882a593Smuzhiyun *
540*4882a593Smuzhiyun * Checks if @task should be traced or not from @filtered_pids.
541*4882a593Smuzhiyun * Returns true if @task should *NOT* be traced.
542*4882a593Smuzhiyun * Returns false if @task should be traced.
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun bool
trace_ignore_this_task(struct trace_pid_list * filtered_pids,struct trace_pid_list * filtered_no_pids,struct task_struct * task)545*4882a593Smuzhiyun trace_ignore_this_task(struct trace_pid_list *filtered_pids,
546*4882a593Smuzhiyun struct trace_pid_list *filtered_no_pids,
547*4882a593Smuzhiyun struct task_struct *task)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun * If filterd_no_pids is not empty, and the task's pid is listed
551*4882a593Smuzhiyun * in filtered_no_pids, then return true.
552*4882a593Smuzhiyun * Otherwise, if filtered_pids is empty, that means we can
553*4882a593Smuzhiyun * trace all tasks. If it has content, then only trace pids
554*4882a593Smuzhiyun * within filtered_pids.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun return (filtered_pids &&
558*4882a593Smuzhiyun !trace_find_filtered_pid(filtered_pids, task->pid)) ||
559*4882a593Smuzhiyun (filtered_no_pids &&
560*4882a593Smuzhiyun trace_find_filtered_pid(filtered_no_pids, task->pid));
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /**
564*4882a593Smuzhiyun * trace_filter_add_remove_task - Add or remove a task from a pid_list
565*4882a593Smuzhiyun * @pid_list: The list to modify
566*4882a593Smuzhiyun * @self: The current task for fork or NULL for exit
567*4882a593Smuzhiyun * @task: The task to add or remove
568*4882a593Smuzhiyun *
569*4882a593Smuzhiyun * If adding a task, if @self is defined, the task is only added if @self
570*4882a593Smuzhiyun * is also included in @pid_list. This happens on fork and tasks should
571*4882a593Smuzhiyun * only be added when the parent is listed. If @self is NULL, then the
572*4882a593Smuzhiyun * @task pid will be removed from the list, which would happen on exit
573*4882a593Smuzhiyun * of a task.
574*4882a593Smuzhiyun */
trace_filter_add_remove_task(struct trace_pid_list * pid_list,struct task_struct * self,struct task_struct * task)575*4882a593Smuzhiyun void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
576*4882a593Smuzhiyun struct task_struct *self,
577*4882a593Smuzhiyun struct task_struct *task)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun if (!pid_list)
580*4882a593Smuzhiyun return;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* For forks, we only add if the forking task is listed */
583*4882a593Smuzhiyun if (self) {
584*4882a593Smuzhiyun if (!trace_find_filtered_pid(pid_list, self->pid))
585*4882a593Smuzhiyun return;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* Sorry, but we don't support pid_max changing after setting */
589*4882a593Smuzhiyun if (task->pid >= pid_list->pid_max)
590*4882a593Smuzhiyun return;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* "self" is set for forks, and NULL for exits */
593*4882a593Smuzhiyun if (self)
594*4882a593Smuzhiyun set_bit(task->pid, pid_list->pids);
595*4882a593Smuzhiyun else
596*4882a593Smuzhiyun clear_bit(task->pid, pid_list->pids);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /**
600*4882a593Smuzhiyun * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
601*4882a593Smuzhiyun * @pid_list: The pid list to show
602*4882a593Smuzhiyun * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
603*4882a593Smuzhiyun * @pos: The position of the file
604*4882a593Smuzhiyun *
605*4882a593Smuzhiyun * This is used by the seq_file "next" operation to iterate the pids
606*4882a593Smuzhiyun * listed in a trace_pid_list structure.
607*4882a593Smuzhiyun *
608*4882a593Smuzhiyun * Returns the pid+1 as we want to display pid of zero, but NULL would
609*4882a593Smuzhiyun * stop the iteration.
610*4882a593Smuzhiyun */
trace_pid_next(struct trace_pid_list * pid_list,void * v,loff_t * pos)611*4882a593Smuzhiyun void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun unsigned long pid = (unsigned long)v;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun (*pos)++;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* pid already is +1 of the actual prevous bit */
618*4882a593Smuzhiyun pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /* Return pid + 1 to allow zero to be represented */
621*4882a593Smuzhiyun if (pid < pid_list->pid_max)
622*4882a593Smuzhiyun return (void *)(pid + 1);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun return NULL;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /**
628*4882a593Smuzhiyun * trace_pid_start - Used for seq_file to start reading pid lists
629*4882a593Smuzhiyun * @pid_list: The pid list to show
630*4882a593Smuzhiyun * @pos: The position of the file
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * This is used by seq_file "start" operation to start the iteration
633*4882a593Smuzhiyun * of listing pids.
634*4882a593Smuzhiyun *
635*4882a593Smuzhiyun * Returns the pid+1 as we want to display pid of zero, but NULL would
636*4882a593Smuzhiyun * stop the iteration.
637*4882a593Smuzhiyun */
trace_pid_start(struct trace_pid_list * pid_list,loff_t * pos)638*4882a593Smuzhiyun void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun unsigned long pid;
641*4882a593Smuzhiyun loff_t l = 0;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun pid = find_first_bit(pid_list->pids, pid_list->pid_max);
644*4882a593Smuzhiyun if (pid >= pid_list->pid_max)
645*4882a593Smuzhiyun return NULL;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* Return pid + 1 so that zero can be the exit value */
648*4882a593Smuzhiyun for (pid++; pid && l < *pos;
649*4882a593Smuzhiyun pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
650*4882a593Smuzhiyun ;
651*4882a593Smuzhiyun return (void *)pid;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /**
655*4882a593Smuzhiyun * trace_pid_show - show the current pid in seq_file processing
656*4882a593Smuzhiyun * @m: The seq_file structure to write into
657*4882a593Smuzhiyun * @v: A void pointer of the pid (+1) value to display
658*4882a593Smuzhiyun *
659*4882a593Smuzhiyun * Can be directly used by seq_file operations to display the current
660*4882a593Smuzhiyun * pid value.
661*4882a593Smuzhiyun */
trace_pid_show(struct seq_file * m,void * v)662*4882a593Smuzhiyun int trace_pid_show(struct seq_file *m, void *v)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun unsigned long pid = (unsigned long)v - 1;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun seq_printf(m, "%lu\n", pid);
667*4882a593Smuzhiyun return 0;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /* 128 should be much more than enough */
671*4882a593Smuzhiyun #define PID_BUF_SIZE 127
672*4882a593Smuzhiyun
trace_pid_write(struct trace_pid_list * filtered_pids,struct trace_pid_list ** new_pid_list,const char __user * ubuf,size_t cnt)673*4882a593Smuzhiyun int trace_pid_write(struct trace_pid_list *filtered_pids,
674*4882a593Smuzhiyun struct trace_pid_list **new_pid_list,
675*4882a593Smuzhiyun const char __user *ubuf, size_t cnt)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun struct trace_pid_list *pid_list;
678*4882a593Smuzhiyun struct trace_parser parser;
679*4882a593Smuzhiyun unsigned long val;
680*4882a593Smuzhiyun int nr_pids = 0;
681*4882a593Smuzhiyun ssize_t read = 0;
682*4882a593Smuzhiyun ssize_t ret = 0;
683*4882a593Smuzhiyun loff_t pos;
684*4882a593Smuzhiyun pid_t pid;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
687*4882a593Smuzhiyun return -ENOMEM;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /*
690*4882a593Smuzhiyun * Always recreate a new array. The write is an all or nothing
691*4882a593Smuzhiyun * operation. Always create a new array when adding new pids by
692*4882a593Smuzhiyun * the user. If the operation fails, then the current list is
693*4882a593Smuzhiyun * not modified.
694*4882a593Smuzhiyun */
695*4882a593Smuzhiyun pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
696*4882a593Smuzhiyun if (!pid_list) {
697*4882a593Smuzhiyun trace_parser_put(&parser);
698*4882a593Smuzhiyun return -ENOMEM;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun pid_list->pid_max = READ_ONCE(pid_max);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* Only truncating will shrink pid_max */
704*4882a593Smuzhiyun if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
705*4882a593Smuzhiyun pid_list->pid_max = filtered_pids->pid_max;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
708*4882a593Smuzhiyun if (!pid_list->pids) {
709*4882a593Smuzhiyun trace_parser_put(&parser);
710*4882a593Smuzhiyun kfree(pid_list);
711*4882a593Smuzhiyun return -ENOMEM;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (filtered_pids) {
715*4882a593Smuzhiyun /* copy the current bits to the new max */
716*4882a593Smuzhiyun for_each_set_bit(pid, filtered_pids->pids,
717*4882a593Smuzhiyun filtered_pids->pid_max) {
718*4882a593Smuzhiyun set_bit(pid, pid_list->pids);
719*4882a593Smuzhiyun nr_pids++;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun while (cnt > 0) {
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun pos = 0;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun ret = trace_get_user(&parser, ubuf, cnt, &pos);
728*4882a593Smuzhiyun if (ret < 0 || !trace_parser_loaded(&parser))
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun read += ret;
732*4882a593Smuzhiyun ubuf += ret;
733*4882a593Smuzhiyun cnt -= ret;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun ret = -EINVAL;
736*4882a593Smuzhiyun if (kstrtoul(parser.buffer, 0, &val))
737*4882a593Smuzhiyun break;
738*4882a593Smuzhiyun if (val >= pid_list->pid_max)
739*4882a593Smuzhiyun break;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun pid = (pid_t)val;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun set_bit(pid, pid_list->pids);
744*4882a593Smuzhiyun nr_pids++;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun trace_parser_clear(&parser);
747*4882a593Smuzhiyun ret = 0;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun trace_parser_put(&parser);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun if (ret < 0) {
752*4882a593Smuzhiyun trace_free_pid_list(pid_list);
753*4882a593Smuzhiyun return ret;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun if (!nr_pids) {
757*4882a593Smuzhiyun /* Cleared the list of pids */
758*4882a593Smuzhiyun trace_free_pid_list(pid_list);
759*4882a593Smuzhiyun read = ret;
760*4882a593Smuzhiyun pid_list = NULL;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun *new_pid_list = pid_list;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun return read;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
buffer_ftrace_now(struct array_buffer * buf,int cpu)768*4882a593Smuzhiyun static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun u64 ts;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /* Early boot up does not have a buffer yet */
773*4882a593Smuzhiyun if (!buf->buffer)
774*4882a593Smuzhiyun return trace_clock_local();
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun ts = ring_buffer_time_stamp(buf->buffer, cpu);
777*4882a593Smuzhiyun ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return ts;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
ftrace_now(int cpu)782*4882a593Smuzhiyun u64 ftrace_now(int cpu)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun return buffer_ftrace_now(&global_trace.array_buffer, cpu);
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /**
788*4882a593Smuzhiyun * tracing_is_enabled - Show if global_trace has been disabled
789*4882a593Smuzhiyun *
790*4882a593Smuzhiyun * Shows if the global trace has been enabled or not. It uses the
791*4882a593Smuzhiyun * mirror flag "buffer_disabled" to be used in fast paths such as for
792*4882a593Smuzhiyun * the irqsoff tracer. But it may be inaccurate due to races. If you
793*4882a593Smuzhiyun * need to know the accurate state, use tracing_is_on() which is a little
794*4882a593Smuzhiyun * slower, but accurate.
795*4882a593Smuzhiyun */
tracing_is_enabled(void)796*4882a593Smuzhiyun int tracing_is_enabled(void)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun /*
799*4882a593Smuzhiyun * For quick access (irqsoff uses this in fast path), just
800*4882a593Smuzhiyun * return the mirror variable of the state of the ring buffer.
801*4882a593Smuzhiyun * It's a little racy, but we don't really care.
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun smp_rmb();
804*4882a593Smuzhiyun return !global_trace.buffer_disabled;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /*
808*4882a593Smuzhiyun * trace_buf_size is the size in bytes that is allocated
809*4882a593Smuzhiyun * for a buffer. Note, the number of bytes is always rounded
810*4882a593Smuzhiyun * to page size.
811*4882a593Smuzhiyun *
812*4882a593Smuzhiyun * This number is purposely set to a low number of 16384.
813*4882a593Smuzhiyun * If the dump on oops happens, it will be much appreciated
814*4882a593Smuzhiyun * to not have to wait for all that output. Anyway this can be
815*4882a593Smuzhiyun * boot time and run time configurable.
816*4882a593Smuzhiyun */
817*4882a593Smuzhiyun #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* trace_types holds a link list of available tracers. */
822*4882a593Smuzhiyun static struct tracer *trace_types __read_mostly;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /*
825*4882a593Smuzhiyun * trace_types_lock is used to protect the trace_types list.
826*4882a593Smuzhiyun */
827*4882a593Smuzhiyun DEFINE_MUTEX(trace_types_lock);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /*
830*4882a593Smuzhiyun * serialize the access of the ring buffer
831*4882a593Smuzhiyun *
832*4882a593Smuzhiyun * ring buffer serializes readers, but it is low level protection.
833*4882a593Smuzhiyun * The validity of the events (which returns by ring_buffer_peek() ..etc)
834*4882a593Smuzhiyun * are not protected by ring buffer.
835*4882a593Smuzhiyun *
836*4882a593Smuzhiyun * The content of events may become garbage if we allow other process consumes
837*4882a593Smuzhiyun * these events concurrently:
838*4882a593Smuzhiyun * A) the page of the consumed events may become a normal page
839*4882a593Smuzhiyun * (not reader page) in ring buffer, and this page will be rewrited
840*4882a593Smuzhiyun * by events producer.
841*4882a593Smuzhiyun * B) The page of the consumed events may become a page for splice_read,
842*4882a593Smuzhiyun * and this page will be returned to system.
843*4882a593Smuzhiyun *
844*4882a593Smuzhiyun * These primitives allow multi process access to different cpu ring buffer
845*4882a593Smuzhiyun * concurrently.
846*4882a593Smuzhiyun *
847*4882a593Smuzhiyun * These primitives don't distinguish read-only and read-consume access.
848*4882a593Smuzhiyun * Multi read-only access are also serialized.
849*4882a593Smuzhiyun */
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun #ifdef CONFIG_SMP
852*4882a593Smuzhiyun static DECLARE_RWSEM(all_cpu_access_lock);
853*4882a593Smuzhiyun static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
854*4882a593Smuzhiyun
trace_access_lock(int cpu)855*4882a593Smuzhiyun static inline void trace_access_lock(int cpu)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun if (cpu == RING_BUFFER_ALL_CPUS) {
858*4882a593Smuzhiyun /* gain it for accessing the whole ring buffer. */
859*4882a593Smuzhiyun down_write(&all_cpu_access_lock);
860*4882a593Smuzhiyun } else {
861*4882a593Smuzhiyun /* gain it for accessing a cpu ring buffer. */
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
864*4882a593Smuzhiyun down_read(&all_cpu_access_lock);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* Secondly block other access to this @cpu ring buffer. */
867*4882a593Smuzhiyun mutex_lock(&per_cpu(cpu_access_lock, cpu));
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
trace_access_unlock(int cpu)871*4882a593Smuzhiyun static inline void trace_access_unlock(int cpu)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun if (cpu == RING_BUFFER_ALL_CPUS) {
874*4882a593Smuzhiyun up_write(&all_cpu_access_lock);
875*4882a593Smuzhiyun } else {
876*4882a593Smuzhiyun mutex_unlock(&per_cpu(cpu_access_lock, cpu));
877*4882a593Smuzhiyun up_read(&all_cpu_access_lock);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
trace_access_lock_init(void)881*4882a593Smuzhiyun static inline void trace_access_lock_init(void)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun int cpu;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun for_each_possible_cpu(cpu)
886*4882a593Smuzhiyun mutex_init(&per_cpu(cpu_access_lock, cpu));
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun #else
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun static DEFINE_MUTEX(access_lock);
892*4882a593Smuzhiyun
trace_access_lock(int cpu)893*4882a593Smuzhiyun static inline void trace_access_lock(int cpu)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun (void)cpu;
896*4882a593Smuzhiyun mutex_lock(&access_lock);
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
trace_access_unlock(int cpu)899*4882a593Smuzhiyun static inline void trace_access_unlock(int cpu)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun (void)cpu;
902*4882a593Smuzhiyun mutex_unlock(&access_lock);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
trace_access_lock_init(void)905*4882a593Smuzhiyun static inline void trace_access_lock_init(void)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun #endif
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun #ifdef CONFIG_STACKTRACE
912*4882a593Smuzhiyun static void __ftrace_trace_stack(struct trace_buffer *buffer,
913*4882a593Smuzhiyun unsigned long flags,
914*4882a593Smuzhiyun int skip, int pc, struct pt_regs *regs);
915*4882a593Smuzhiyun static inline void ftrace_trace_stack(struct trace_array *tr,
916*4882a593Smuzhiyun struct trace_buffer *buffer,
917*4882a593Smuzhiyun unsigned long flags,
918*4882a593Smuzhiyun int skip, int pc, struct pt_regs *regs);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun #else
__ftrace_trace_stack(struct trace_buffer * buffer,unsigned long flags,int skip,int pc,struct pt_regs * regs)921*4882a593Smuzhiyun static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
922*4882a593Smuzhiyun unsigned long flags,
923*4882a593Smuzhiyun int skip, int pc, struct pt_regs *regs)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun }
ftrace_trace_stack(struct trace_array * tr,struct trace_buffer * buffer,unsigned long flags,int skip,int pc,struct pt_regs * regs)926*4882a593Smuzhiyun static inline void ftrace_trace_stack(struct trace_array *tr,
927*4882a593Smuzhiyun struct trace_buffer *buffer,
928*4882a593Smuzhiyun unsigned long flags,
929*4882a593Smuzhiyun int skip, int pc, struct pt_regs *regs)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun #endif
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun static __always_inline void
trace_event_setup(struct ring_buffer_event * event,int type,unsigned long flags,int pc)936*4882a593Smuzhiyun trace_event_setup(struct ring_buffer_event *event,
937*4882a593Smuzhiyun int type, unsigned long flags, int pc)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun struct trace_entry *ent = ring_buffer_event_data(event);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun tracing_generic_entry_update(ent, type, flags, pc);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct trace_buffer * buffer,int type,unsigned long len,unsigned long flags,int pc)945*4882a593Smuzhiyun __trace_buffer_lock_reserve(struct trace_buffer *buffer,
946*4882a593Smuzhiyun int type,
947*4882a593Smuzhiyun unsigned long len,
948*4882a593Smuzhiyun unsigned long flags, int pc)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun struct ring_buffer_event *event;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun event = ring_buffer_lock_reserve(buffer, len);
953*4882a593Smuzhiyun if (event != NULL)
954*4882a593Smuzhiyun trace_event_setup(event, type, flags, pc);
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun return event;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
tracer_tracing_on(struct trace_array * tr)959*4882a593Smuzhiyun void tracer_tracing_on(struct trace_array *tr)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun if (tr->array_buffer.buffer)
962*4882a593Smuzhiyun ring_buffer_record_on(tr->array_buffer.buffer);
963*4882a593Smuzhiyun /*
964*4882a593Smuzhiyun * This flag is looked at when buffers haven't been allocated
965*4882a593Smuzhiyun * yet, or by some tracers (like irqsoff), that just want to
966*4882a593Smuzhiyun * know if the ring buffer has been disabled, but it can handle
967*4882a593Smuzhiyun * races of where it gets disabled but we still do a record.
968*4882a593Smuzhiyun * As the check is in the fast path of the tracers, it is more
969*4882a593Smuzhiyun * important to be fast than accurate.
970*4882a593Smuzhiyun */
971*4882a593Smuzhiyun tr->buffer_disabled = 0;
972*4882a593Smuzhiyun /* Make the flag seen by readers */
973*4882a593Smuzhiyun smp_wmb();
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /**
977*4882a593Smuzhiyun * tracing_on - enable tracing buffers
978*4882a593Smuzhiyun *
979*4882a593Smuzhiyun * This function enables tracing buffers that may have been
980*4882a593Smuzhiyun * disabled with tracing_off.
981*4882a593Smuzhiyun */
tracing_on(void)982*4882a593Smuzhiyun void tracing_on(void)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun tracer_tracing_on(&global_trace);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_on);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun static __always_inline void
__buffer_unlock_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)990*4882a593Smuzhiyun __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun __this_cpu_write(trace_taskinfo_save, true);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun /* If this is the temp buffer, we need to commit fully */
995*4882a593Smuzhiyun if (this_cpu_read(trace_buffered_event) == event) {
996*4882a593Smuzhiyun /* Length is in event->array[0] */
997*4882a593Smuzhiyun ring_buffer_write(buffer, event->array[0], &event->array[1]);
998*4882a593Smuzhiyun /* Release the temp buffer */
999*4882a593Smuzhiyun this_cpu_dec(trace_buffered_event_cnt);
1000*4882a593Smuzhiyun } else
1001*4882a593Smuzhiyun ring_buffer_unlock_commit(buffer, event);
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /**
1005*4882a593Smuzhiyun * __trace_puts - write a constant string into the trace buffer.
1006*4882a593Smuzhiyun * @ip: The address of the caller
1007*4882a593Smuzhiyun * @str: The constant string to write
1008*4882a593Smuzhiyun * @size: The size of the string.
1009*4882a593Smuzhiyun */
__trace_puts(unsigned long ip,const char * str,int size)1010*4882a593Smuzhiyun int __trace_puts(unsigned long ip, const char *str, int size)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun struct ring_buffer_event *event;
1013*4882a593Smuzhiyun struct trace_buffer *buffer;
1014*4882a593Smuzhiyun struct print_entry *entry;
1015*4882a593Smuzhiyun unsigned long irq_flags;
1016*4882a593Smuzhiyun int alloc;
1017*4882a593Smuzhiyun int pc;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1020*4882a593Smuzhiyun return 0;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun pc = preempt_count();
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (unlikely(tracing_selftest_running || tracing_disabled))
1025*4882a593Smuzhiyun return 0;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun alloc = sizeof(*entry) + size + 2; /* possible \n added */
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun local_save_flags(irq_flags);
1030*4882a593Smuzhiyun buffer = global_trace.array_buffer.buffer;
1031*4882a593Smuzhiyun ring_buffer_nest_start(buffer);
1032*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1033*4882a593Smuzhiyun irq_flags, pc);
1034*4882a593Smuzhiyun if (!event) {
1035*4882a593Smuzhiyun size = 0;
1036*4882a593Smuzhiyun goto out;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
1040*4882a593Smuzhiyun entry->ip = ip;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun memcpy(&entry->buf, str, size);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /* Add a newline if necessary */
1045*4882a593Smuzhiyun if (entry->buf[size - 1] != '\n') {
1046*4882a593Smuzhiyun entry->buf[size] = '\n';
1047*4882a593Smuzhiyun entry->buf[size + 1] = '\0';
1048*4882a593Smuzhiyun } else
1049*4882a593Smuzhiyun entry->buf[size] = '\0';
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
1052*4882a593Smuzhiyun ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1053*4882a593Smuzhiyun out:
1054*4882a593Smuzhiyun ring_buffer_nest_end(buffer);
1055*4882a593Smuzhiyun return size;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__trace_puts);
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /**
1060*4882a593Smuzhiyun * __trace_bputs - write the pointer to a constant string into trace buffer
1061*4882a593Smuzhiyun * @ip: The address of the caller
1062*4882a593Smuzhiyun * @str: The constant string to write to the buffer to
1063*4882a593Smuzhiyun */
__trace_bputs(unsigned long ip,const char * str)1064*4882a593Smuzhiyun int __trace_bputs(unsigned long ip, const char *str)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun struct ring_buffer_event *event;
1067*4882a593Smuzhiyun struct trace_buffer *buffer;
1068*4882a593Smuzhiyun struct bputs_entry *entry;
1069*4882a593Smuzhiyun unsigned long irq_flags;
1070*4882a593Smuzhiyun int size = sizeof(struct bputs_entry);
1071*4882a593Smuzhiyun int ret = 0;
1072*4882a593Smuzhiyun int pc;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1075*4882a593Smuzhiyun return 0;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun pc = preempt_count();
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (unlikely(tracing_selftest_running || tracing_disabled))
1080*4882a593Smuzhiyun return 0;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun local_save_flags(irq_flags);
1083*4882a593Smuzhiyun buffer = global_trace.array_buffer.buffer;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun ring_buffer_nest_start(buffer);
1086*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1087*4882a593Smuzhiyun irq_flags, pc);
1088*4882a593Smuzhiyun if (!event)
1089*4882a593Smuzhiyun goto out;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
1092*4882a593Smuzhiyun entry->ip = ip;
1093*4882a593Smuzhiyun entry->str = str;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
1096*4882a593Smuzhiyun ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun ret = 1;
1099*4882a593Smuzhiyun out:
1100*4882a593Smuzhiyun ring_buffer_nest_end(buffer);
1101*4882a593Smuzhiyun return ret;
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__trace_bputs);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
tracing_snapshot_instance_cond(struct trace_array * tr,void * cond_data)1106*4882a593Smuzhiyun static void tracing_snapshot_instance_cond(struct trace_array *tr,
1107*4882a593Smuzhiyun void *cond_data)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct tracer *tracer = tr->current_trace;
1110*4882a593Smuzhiyun unsigned long flags;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun if (in_nmi()) {
1113*4882a593Smuzhiyun internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1114*4882a593Smuzhiyun internal_trace_puts("*** snapshot is being ignored ***\n");
1115*4882a593Smuzhiyun return;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun if (!tr->allocated_snapshot) {
1119*4882a593Smuzhiyun internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1120*4882a593Smuzhiyun internal_trace_puts("*** stopping trace here! ***\n");
1121*4882a593Smuzhiyun tracing_off();
1122*4882a593Smuzhiyun return;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* Note, snapshot can not be used when the tracer uses it */
1126*4882a593Smuzhiyun if (tracer->use_max_tr) {
1127*4882a593Smuzhiyun internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1128*4882a593Smuzhiyun internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1129*4882a593Smuzhiyun return;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun local_irq_save(flags);
1133*4882a593Smuzhiyun update_max_tr(tr, current, smp_processor_id(), cond_data);
1134*4882a593Smuzhiyun local_irq_restore(flags);
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
tracing_snapshot_instance(struct trace_array * tr)1137*4882a593Smuzhiyun void tracing_snapshot_instance(struct trace_array *tr)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun tracing_snapshot_instance_cond(tr, NULL);
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /**
1143*4882a593Smuzhiyun * tracing_snapshot - take a snapshot of the current buffer.
1144*4882a593Smuzhiyun *
1145*4882a593Smuzhiyun * This causes a swap between the snapshot buffer and the current live
1146*4882a593Smuzhiyun * tracing buffer. You can use this to take snapshots of the live
1147*4882a593Smuzhiyun * trace when some condition is triggered, but continue to trace.
1148*4882a593Smuzhiyun *
1149*4882a593Smuzhiyun * Note, make sure to allocate the snapshot with either
1150*4882a593Smuzhiyun * a tracing_snapshot_alloc(), or by doing it manually
1151*4882a593Smuzhiyun * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1152*4882a593Smuzhiyun *
1153*4882a593Smuzhiyun * If the snapshot buffer is not allocated, it will stop tracing.
1154*4882a593Smuzhiyun * Basically making a permanent snapshot.
1155*4882a593Smuzhiyun */
tracing_snapshot(void)1156*4882a593Smuzhiyun void tracing_snapshot(void)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun struct trace_array *tr = &global_trace;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun tracing_snapshot_instance(tr);
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /**
1165*4882a593Smuzhiyun * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1166*4882a593Smuzhiyun * @tr: The tracing instance to snapshot
1167*4882a593Smuzhiyun * @cond_data: The data to be tested conditionally, and possibly saved
1168*4882a593Smuzhiyun *
1169*4882a593Smuzhiyun * This is the same as tracing_snapshot() except that the snapshot is
1170*4882a593Smuzhiyun * conditional - the snapshot will only happen if the
1171*4882a593Smuzhiyun * cond_snapshot.update() implementation receiving the cond_data
1172*4882a593Smuzhiyun * returns true, which means that the trace array's cond_snapshot
1173*4882a593Smuzhiyun * update() operation used the cond_data to determine whether the
1174*4882a593Smuzhiyun * snapshot should be taken, and if it was, presumably saved it along
1175*4882a593Smuzhiyun * with the snapshot.
1176*4882a593Smuzhiyun */
tracing_snapshot_cond(struct trace_array * tr,void * cond_data)1177*4882a593Smuzhiyun void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun tracing_snapshot_instance_cond(tr, cond_data);
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun /**
1184*4882a593Smuzhiyun * tracing_snapshot_cond_data - get the user data associated with a snapshot
1185*4882a593Smuzhiyun * @tr: The tracing instance
1186*4882a593Smuzhiyun *
1187*4882a593Smuzhiyun * When the user enables a conditional snapshot using
1188*4882a593Smuzhiyun * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1189*4882a593Smuzhiyun * with the snapshot. This accessor is used to retrieve it.
1190*4882a593Smuzhiyun *
1191*4882a593Smuzhiyun * Should not be called from cond_snapshot.update(), since it takes
1192*4882a593Smuzhiyun * the tr->max_lock lock, which the code calling
1193*4882a593Smuzhiyun * cond_snapshot.update() has already done.
1194*4882a593Smuzhiyun *
1195*4882a593Smuzhiyun * Returns the cond_data associated with the trace array's snapshot.
1196*4882a593Smuzhiyun */
tracing_cond_snapshot_data(struct trace_array * tr)1197*4882a593Smuzhiyun void *tracing_cond_snapshot_data(struct trace_array *tr)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun void *cond_data = NULL;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun local_irq_disable();
1202*4882a593Smuzhiyun arch_spin_lock(&tr->max_lock);
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun if (tr->cond_snapshot)
1205*4882a593Smuzhiyun cond_data = tr->cond_snapshot->cond_data;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun arch_spin_unlock(&tr->max_lock);
1208*4882a593Smuzhiyun local_irq_enable();
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun return cond_data;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1215*4882a593Smuzhiyun struct array_buffer *size_buf, int cpu_id);
1216*4882a593Smuzhiyun static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1217*4882a593Smuzhiyun
tracing_alloc_snapshot_instance(struct trace_array * tr)1218*4882a593Smuzhiyun int tracing_alloc_snapshot_instance(struct trace_array *tr)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun int ret;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun if (!tr->allocated_snapshot) {
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun /* allocate spare buffer */
1225*4882a593Smuzhiyun ret = resize_buffer_duplicate_size(&tr->max_buffer,
1226*4882a593Smuzhiyun &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1227*4882a593Smuzhiyun if (ret < 0)
1228*4882a593Smuzhiyun return ret;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun tr->allocated_snapshot = true;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun return 0;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun
free_snapshot(struct trace_array * tr)1236*4882a593Smuzhiyun static void free_snapshot(struct trace_array *tr)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun /*
1239*4882a593Smuzhiyun * We don't free the ring buffer. instead, resize it because
1240*4882a593Smuzhiyun * The max_tr ring buffer has some state (e.g. ring->clock) and
1241*4882a593Smuzhiyun * we want preserve it.
1242*4882a593Smuzhiyun */
1243*4882a593Smuzhiyun ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1244*4882a593Smuzhiyun set_buffer_entries(&tr->max_buffer, 1);
1245*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->max_buffer);
1246*4882a593Smuzhiyun tr->allocated_snapshot = false;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /**
1250*4882a593Smuzhiyun * tracing_alloc_snapshot - allocate snapshot buffer.
1251*4882a593Smuzhiyun *
1252*4882a593Smuzhiyun * This only allocates the snapshot buffer if it isn't already
1253*4882a593Smuzhiyun * allocated - it doesn't also take a snapshot.
1254*4882a593Smuzhiyun *
1255*4882a593Smuzhiyun * This is meant to be used in cases where the snapshot buffer needs
1256*4882a593Smuzhiyun * to be set up for events that can't sleep but need to be able to
1257*4882a593Smuzhiyun * trigger a snapshot.
1258*4882a593Smuzhiyun */
tracing_alloc_snapshot(void)1259*4882a593Smuzhiyun int tracing_alloc_snapshot(void)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun struct trace_array *tr = &global_trace;
1262*4882a593Smuzhiyun int ret;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun ret = tracing_alloc_snapshot_instance(tr);
1265*4882a593Smuzhiyun WARN_ON(ret < 0);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun return ret;
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun /**
1272*4882a593Smuzhiyun * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1273*4882a593Smuzhiyun *
1274*4882a593Smuzhiyun * This is similar to tracing_snapshot(), but it will allocate the
1275*4882a593Smuzhiyun * snapshot buffer if it isn't already allocated. Use this only
1276*4882a593Smuzhiyun * where it is safe to sleep, as the allocation may sleep.
1277*4882a593Smuzhiyun *
1278*4882a593Smuzhiyun * This causes a swap between the snapshot buffer and the current live
1279*4882a593Smuzhiyun * tracing buffer. You can use this to take snapshots of the live
1280*4882a593Smuzhiyun * trace when some condition is triggered, but continue to trace.
1281*4882a593Smuzhiyun */
tracing_snapshot_alloc(void)1282*4882a593Smuzhiyun void tracing_snapshot_alloc(void)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun int ret;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun ret = tracing_alloc_snapshot();
1287*4882a593Smuzhiyun if (ret < 0)
1288*4882a593Smuzhiyun return;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun tracing_snapshot();
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /**
1295*4882a593Smuzhiyun * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1296*4882a593Smuzhiyun * @tr: The tracing instance
1297*4882a593Smuzhiyun * @cond_data: User data to associate with the snapshot
1298*4882a593Smuzhiyun * @update: Implementation of the cond_snapshot update function
1299*4882a593Smuzhiyun *
1300*4882a593Smuzhiyun * Check whether the conditional snapshot for the given instance has
1301*4882a593Smuzhiyun * already been enabled, or if the current tracer is already using a
1302*4882a593Smuzhiyun * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1303*4882a593Smuzhiyun * save the cond_data and update function inside.
1304*4882a593Smuzhiyun *
1305*4882a593Smuzhiyun * Returns 0 if successful, error otherwise.
1306*4882a593Smuzhiyun */
tracing_snapshot_cond_enable(struct trace_array * tr,void * cond_data,cond_update_fn_t update)1307*4882a593Smuzhiyun int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1308*4882a593Smuzhiyun cond_update_fn_t update)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun struct cond_snapshot *cond_snapshot;
1311*4882a593Smuzhiyun int ret = 0;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1314*4882a593Smuzhiyun if (!cond_snapshot)
1315*4882a593Smuzhiyun return -ENOMEM;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun cond_snapshot->cond_data = cond_data;
1318*4882a593Smuzhiyun cond_snapshot->update = update;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun ret = tracing_alloc_snapshot_instance(tr);
1323*4882a593Smuzhiyun if (ret)
1324*4882a593Smuzhiyun goto fail_unlock;
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun if (tr->current_trace->use_max_tr) {
1327*4882a593Smuzhiyun ret = -EBUSY;
1328*4882a593Smuzhiyun goto fail_unlock;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /*
1332*4882a593Smuzhiyun * The cond_snapshot can only change to NULL without the
1333*4882a593Smuzhiyun * trace_types_lock. We don't care if we race with it going
1334*4882a593Smuzhiyun * to NULL, but we want to make sure that it's not set to
1335*4882a593Smuzhiyun * something other than NULL when we get here, which we can
1336*4882a593Smuzhiyun * do safely with only holding the trace_types_lock and not
1337*4882a593Smuzhiyun * having to take the max_lock.
1338*4882a593Smuzhiyun */
1339*4882a593Smuzhiyun if (tr->cond_snapshot) {
1340*4882a593Smuzhiyun ret = -EBUSY;
1341*4882a593Smuzhiyun goto fail_unlock;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun local_irq_disable();
1345*4882a593Smuzhiyun arch_spin_lock(&tr->max_lock);
1346*4882a593Smuzhiyun tr->cond_snapshot = cond_snapshot;
1347*4882a593Smuzhiyun arch_spin_unlock(&tr->max_lock);
1348*4882a593Smuzhiyun local_irq_enable();
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun return ret;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun fail_unlock:
1355*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
1356*4882a593Smuzhiyun kfree(cond_snapshot);
1357*4882a593Smuzhiyun return ret;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun /**
1362*4882a593Smuzhiyun * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1363*4882a593Smuzhiyun * @tr: The tracing instance
1364*4882a593Smuzhiyun *
1365*4882a593Smuzhiyun * Check whether the conditional snapshot for the given instance is
1366*4882a593Smuzhiyun * enabled; if so, free the cond_snapshot associated with it,
1367*4882a593Smuzhiyun * otherwise return -EINVAL.
1368*4882a593Smuzhiyun *
1369*4882a593Smuzhiyun * Returns 0 if successful, error otherwise.
1370*4882a593Smuzhiyun */
tracing_snapshot_cond_disable(struct trace_array * tr)1371*4882a593Smuzhiyun int tracing_snapshot_cond_disable(struct trace_array *tr)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun int ret = 0;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun local_irq_disable();
1376*4882a593Smuzhiyun arch_spin_lock(&tr->max_lock);
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun if (!tr->cond_snapshot)
1379*4882a593Smuzhiyun ret = -EINVAL;
1380*4882a593Smuzhiyun else {
1381*4882a593Smuzhiyun kfree(tr->cond_snapshot);
1382*4882a593Smuzhiyun tr->cond_snapshot = NULL;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun arch_spin_unlock(&tr->max_lock);
1386*4882a593Smuzhiyun local_irq_enable();
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun return ret;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1391*4882a593Smuzhiyun #else
tracing_snapshot(void)1392*4882a593Smuzhiyun void tracing_snapshot(void)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot);
tracing_snapshot_cond(struct trace_array * tr,void * cond_data)1397*4882a593Smuzhiyun void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
tracing_alloc_snapshot(void)1402*4882a593Smuzhiyun int tracing_alloc_snapshot(void)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1405*4882a593Smuzhiyun return -ENODEV;
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
tracing_snapshot_alloc(void)1408*4882a593Smuzhiyun void tracing_snapshot_alloc(void)
1409*4882a593Smuzhiyun {
1410*4882a593Smuzhiyun /* Give warning */
1411*4882a593Smuzhiyun tracing_snapshot();
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
tracing_cond_snapshot_data(struct trace_array * tr)1414*4882a593Smuzhiyun void *tracing_cond_snapshot_data(struct trace_array *tr)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun return NULL;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
tracing_snapshot_cond_enable(struct trace_array * tr,void * cond_data,cond_update_fn_t update)1419*4882a593Smuzhiyun int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1420*4882a593Smuzhiyun {
1421*4882a593Smuzhiyun return -ENODEV;
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
tracing_snapshot_cond_disable(struct trace_array * tr)1424*4882a593Smuzhiyun int tracing_snapshot_cond_disable(struct trace_array *tr)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun return false;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1429*4882a593Smuzhiyun #endif /* CONFIG_TRACER_SNAPSHOT */
1430*4882a593Smuzhiyun
tracer_tracing_off(struct trace_array * tr)1431*4882a593Smuzhiyun void tracer_tracing_off(struct trace_array *tr)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun if (tr->array_buffer.buffer)
1434*4882a593Smuzhiyun ring_buffer_record_off(tr->array_buffer.buffer);
1435*4882a593Smuzhiyun /*
1436*4882a593Smuzhiyun * This flag is looked at when buffers haven't been allocated
1437*4882a593Smuzhiyun * yet, or by some tracers (like irqsoff), that just want to
1438*4882a593Smuzhiyun * know if the ring buffer has been disabled, but it can handle
1439*4882a593Smuzhiyun * races of where it gets disabled but we still do a record.
1440*4882a593Smuzhiyun * As the check is in the fast path of the tracers, it is more
1441*4882a593Smuzhiyun * important to be fast than accurate.
1442*4882a593Smuzhiyun */
1443*4882a593Smuzhiyun tr->buffer_disabled = 1;
1444*4882a593Smuzhiyun /* Make the flag seen by readers */
1445*4882a593Smuzhiyun smp_wmb();
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun /**
1449*4882a593Smuzhiyun * tracing_off - turn off tracing buffers
1450*4882a593Smuzhiyun *
1451*4882a593Smuzhiyun * This function stops the tracing buffers from recording data.
1452*4882a593Smuzhiyun * It does not disable any overhead the tracers themselves may
1453*4882a593Smuzhiyun * be causing. This function simply causes all recording to
1454*4882a593Smuzhiyun * the ring buffers to fail.
1455*4882a593Smuzhiyun */
tracing_off(void)1456*4882a593Smuzhiyun void tracing_off(void)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun tracer_tracing_off(&global_trace);
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_off);
1461*4882a593Smuzhiyun
disable_trace_on_warning(void)1462*4882a593Smuzhiyun void disable_trace_on_warning(void)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun if (__disable_trace_on_warning) {
1465*4882a593Smuzhiyun trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1466*4882a593Smuzhiyun "Disabling tracing due to warning\n");
1467*4882a593Smuzhiyun tracing_off();
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun /**
1472*4882a593Smuzhiyun * tracer_tracing_is_on - show real state of ring buffer enabled
1473*4882a593Smuzhiyun * @tr : the trace array to know if ring buffer is enabled
1474*4882a593Smuzhiyun *
1475*4882a593Smuzhiyun * Shows real state of the ring buffer if it is enabled or not.
1476*4882a593Smuzhiyun */
tracer_tracing_is_on(struct trace_array * tr)1477*4882a593Smuzhiyun bool tracer_tracing_is_on(struct trace_array *tr)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun if (tr->array_buffer.buffer)
1480*4882a593Smuzhiyun return ring_buffer_record_is_on(tr->array_buffer.buffer);
1481*4882a593Smuzhiyun return !tr->buffer_disabled;
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun /**
1485*4882a593Smuzhiyun * tracing_is_on - show state of ring buffers enabled
1486*4882a593Smuzhiyun */
tracing_is_on(void)1487*4882a593Smuzhiyun int tracing_is_on(void)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun return tracer_tracing_is_on(&global_trace);
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_is_on);
1492*4882a593Smuzhiyun
set_buf_size(char * str)1493*4882a593Smuzhiyun static int __init set_buf_size(char *str)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun unsigned long buf_size;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun if (!str)
1498*4882a593Smuzhiyun return 0;
1499*4882a593Smuzhiyun buf_size = memparse(str, &str);
1500*4882a593Smuzhiyun /*
1501*4882a593Smuzhiyun * nr_entries can not be zero and the startup
1502*4882a593Smuzhiyun * tests require some buffer space. Therefore
1503*4882a593Smuzhiyun * ensure we have at least 4096 bytes of buffer.
1504*4882a593Smuzhiyun */
1505*4882a593Smuzhiyun trace_buf_size = max(4096UL, buf_size);
1506*4882a593Smuzhiyun return 1;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun __setup("trace_buf_size=", set_buf_size);
1509*4882a593Smuzhiyun
set_tracing_thresh(char * str)1510*4882a593Smuzhiyun static int __init set_tracing_thresh(char *str)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun unsigned long threshold;
1513*4882a593Smuzhiyun int ret;
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun if (!str)
1516*4882a593Smuzhiyun return 0;
1517*4882a593Smuzhiyun ret = kstrtoul(str, 0, &threshold);
1518*4882a593Smuzhiyun if (ret < 0)
1519*4882a593Smuzhiyun return 0;
1520*4882a593Smuzhiyun tracing_thresh = threshold * 1000;
1521*4882a593Smuzhiyun return 1;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun __setup("tracing_thresh=", set_tracing_thresh);
1524*4882a593Smuzhiyun
nsecs_to_usecs(unsigned long nsecs)1525*4882a593Smuzhiyun unsigned long nsecs_to_usecs(unsigned long nsecs)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun return nsecs / 1000;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun /*
1531*4882a593Smuzhiyun * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1532*4882a593Smuzhiyun * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1533*4882a593Smuzhiyun * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1534*4882a593Smuzhiyun * of strings in the order that the evals (enum) were defined.
1535*4882a593Smuzhiyun */
1536*4882a593Smuzhiyun #undef C
1537*4882a593Smuzhiyun #define C(a, b) b
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun /* These must match the bit postions in trace_iterator_flags */
1540*4882a593Smuzhiyun static const char *trace_options[] = {
1541*4882a593Smuzhiyun TRACE_FLAGS
1542*4882a593Smuzhiyun NULL
1543*4882a593Smuzhiyun };
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun static struct {
1546*4882a593Smuzhiyun u64 (*func)(void);
1547*4882a593Smuzhiyun const char *name;
1548*4882a593Smuzhiyun int in_ns; /* is this clock in nanoseconds? */
1549*4882a593Smuzhiyun } trace_clocks[] = {
1550*4882a593Smuzhiyun { trace_clock_local, "local", 1 },
1551*4882a593Smuzhiyun { trace_clock_global, "global", 1 },
1552*4882a593Smuzhiyun { trace_clock_counter, "counter", 0 },
1553*4882a593Smuzhiyun { trace_clock_jiffies, "uptime", 0 },
1554*4882a593Smuzhiyun { trace_clock, "perf", 1 },
1555*4882a593Smuzhiyun { ktime_get_mono_fast_ns, "mono", 1 },
1556*4882a593Smuzhiyun { ktime_get_raw_fast_ns, "mono_raw", 1 },
1557*4882a593Smuzhiyun { ktime_get_boot_fast_ns, "boot", 1 },
1558*4882a593Smuzhiyun ARCH_TRACE_CLOCKS
1559*4882a593Smuzhiyun };
1560*4882a593Smuzhiyun
trace_clock_in_ns(struct trace_array * tr)1561*4882a593Smuzhiyun bool trace_clock_in_ns(struct trace_array *tr)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun if (trace_clocks[tr->clock_id].in_ns)
1564*4882a593Smuzhiyun return true;
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun return false;
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun /*
1570*4882a593Smuzhiyun * trace_parser_get_init - gets the buffer for trace parser
1571*4882a593Smuzhiyun */
trace_parser_get_init(struct trace_parser * parser,int size)1572*4882a593Smuzhiyun int trace_parser_get_init(struct trace_parser *parser, int size)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun memset(parser, 0, sizeof(*parser));
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun parser->buffer = kmalloc(size, GFP_KERNEL);
1577*4882a593Smuzhiyun if (!parser->buffer)
1578*4882a593Smuzhiyun return 1;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun parser->size = size;
1581*4882a593Smuzhiyun return 0;
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun /*
1585*4882a593Smuzhiyun * trace_parser_put - frees the buffer for trace parser
1586*4882a593Smuzhiyun */
trace_parser_put(struct trace_parser * parser)1587*4882a593Smuzhiyun void trace_parser_put(struct trace_parser *parser)
1588*4882a593Smuzhiyun {
1589*4882a593Smuzhiyun kfree(parser->buffer);
1590*4882a593Smuzhiyun parser->buffer = NULL;
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun /*
1594*4882a593Smuzhiyun * trace_get_user - reads the user input string separated by space
1595*4882a593Smuzhiyun * (matched by isspace(ch))
1596*4882a593Smuzhiyun *
1597*4882a593Smuzhiyun * For each string found the 'struct trace_parser' is updated,
1598*4882a593Smuzhiyun * and the function returns.
1599*4882a593Smuzhiyun *
1600*4882a593Smuzhiyun * Returns number of bytes read.
1601*4882a593Smuzhiyun *
1602*4882a593Smuzhiyun * See kernel/trace/trace.h for 'struct trace_parser' details.
1603*4882a593Smuzhiyun */
trace_get_user(struct trace_parser * parser,const char __user * ubuf,size_t cnt,loff_t * ppos)1604*4882a593Smuzhiyun int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1605*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
1606*4882a593Smuzhiyun {
1607*4882a593Smuzhiyun char ch;
1608*4882a593Smuzhiyun size_t read = 0;
1609*4882a593Smuzhiyun ssize_t ret;
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun if (!*ppos)
1612*4882a593Smuzhiyun trace_parser_clear(parser);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun ret = get_user(ch, ubuf++);
1615*4882a593Smuzhiyun if (ret)
1616*4882a593Smuzhiyun goto out;
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun read++;
1619*4882a593Smuzhiyun cnt--;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun /*
1622*4882a593Smuzhiyun * The parser is not finished with the last write,
1623*4882a593Smuzhiyun * continue reading the user input without skipping spaces.
1624*4882a593Smuzhiyun */
1625*4882a593Smuzhiyun if (!parser->cont) {
1626*4882a593Smuzhiyun /* skip white space */
1627*4882a593Smuzhiyun while (cnt && isspace(ch)) {
1628*4882a593Smuzhiyun ret = get_user(ch, ubuf++);
1629*4882a593Smuzhiyun if (ret)
1630*4882a593Smuzhiyun goto out;
1631*4882a593Smuzhiyun read++;
1632*4882a593Smuzhiyun cnt--;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun parser->idx = 0;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun /* only spaces were written */
1638*4882a593Smuzhiyun if (isspace(ch) || !ch) {
1639*4882a593Smuzhiyun *ppos += read;
1640*4882a593Smuzhiyun ret = read;
1641*4882a593Smuzhiyun goto out;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun /* read the non-space input */
1646*4882a593Smuzhiyun while (cnt && !isspace(ch) && ch) {
1647*4882a593Smuzhiyun if (parser->idx < parser->size - 1)
1648*4882a593Smuzhiyun parser->buffer[parser->idx++] = ch;
1649*4882a593Smuzhiyun else {
1650*4882a593Smuzhiyun ret = -EINVAL;
1651*4882a593Smuzhiyun goto out;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun ret = get_user(ch, ubuf++);
1654*4882a593Smuzhiyun if (ret)
1655*4882a593Smuzhiyun goto out;
1656*4882a593Smuzhiyun read++;
1657*4882a593Smuzhiyun cnt--;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun /* We either got finished input or we have to wait for another call. */
1661*4882a593Smuzhiyun if (isspace(ch) || !ch) {
1662*4882a593Smuzhiyun parser->buffer[parser->idx] = 0;
1663*4882a593Smuzhiyun parser->cont = false;
1664*4882a593Smuzhiyun } else if (parser->idx < parser->size - 1) {
1665*4882a593Smuzhiyun parser->cont = true;
1666*4882a593Smuzhiyun parser->buffer[parser->idx++] = ch;
1667*4882a593Smuzhiyun /* Make sure the parsed string always terminates with '\0'. */
1668*4882a593Smuzhiyun parser->buffer[parser->idx] = 0;
1669*4882a593Smuzhiyun } else {
1670*4882a593Smuzhiyun ret = -EINVAL;
1671*4882a593Smuzhiyun goto out;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun *ppos += read;
1675*4882a593Smuzhiyun ret = read;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun out:
1678*4882a593Smuzhiyun return ret;
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun /* TODO add a seq_buf_to_buffer() */
trace_seq_to_buffer(struct trace_seq * s,void * buf,size_t cnt)1682*4882a593Smuzhiyun static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1683*4882a593Smuzhiyun {
1684*4882a593Smuzhiyun int len;
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun if (trace_seq_used(s) <= s->seq.readpos)
1687*4882a593Smuzhiyun return -EBUSY;
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun len = trace_seq_used(s) - s->seq.readpos;
1690*4882a593Smuzhiyun if (cnt > len)
1691*4882a593Smuzhiyun cnt = len;
1692*4882a593Smuzhiyun memcpy(buf, s->buffer + s->seq.readpos, cnt);
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun s->seq.readpos += cnt;
1695*4882a593Smuzhiyun return cnt;
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun unsigned long __read_mostly tracing_thresh;
1699*4882a593Smuzhiyun static const struct file_operations tracing_max_lat_fops;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1702*4882a593Smuzhiyun defined(CONFIG_FSNOTIFY)
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun static struct workqueue_struct *fsnotify_wq;
1705*4882a593Smuzhiyun
latency_fsnotify_workfn(struct work_struct * work)1706*4882a593Smuzhiyun static void latency_fsnotify_workfn(struct work_struct *work)
1707*4882a593Smuzhiyun {
1708*4882a593Smuzhiyun struct trace_array *tr = container_of(work, struct trace_array,
1709*4882a593Smuzhiyun fsnotify_work);
1710*4882a593Smuzhiyun fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun
latency_fsnotify_workfn_irq(struct irq_work * iwork)1713*4882a593Smuzhiyun static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1714*4882a593Smuzhiyun {
1715*4882a593Smuzhiyun struct trace_array *tr = container_of(iwork, struct trace_array,
1716*4882a593Smuzhiyun fsnotify_irqwork);
1717*4882a593Smuzhiyun queue_work(fsnotify_wq, &tr->fsnotify_work);
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
trace_create_maxlat_file(struct trace_array * tr,struct dentry * d_tracer)1720*4882a593Smuzhiyun static void trace_create_maxlat_file(struct trace_array *tr,
1721*4882a593Smuzhiyun struct dentry *d_tracer)
1722*4882a593Smuzhiyun {
1723*4882a593Smuzhiyun INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1724*4882a593Smuzhiyun init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1725*4882a593Smuzhiyun tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1726*4882a593Smuzhiyun d_tracer, &tr->max_latency,
1727*4882a593Smuzhiyun &tracing_max_lat_fops);
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun
latency_fsnotify_init(void)1730*4882a593Smuzhiyun __init static int latency_fsnotify_init(void)
1731*4882a593Smuzhiyun {
1732*4882a593Smuzhiyun fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1733*4882a593Smuzhiyun WQ_UNBOUND | WQ_HIGHPRI, 0);
1734*4882a593Smuzhiyun if (!fsnotify_wq) {
1735*4882a593Smuzhiyun pr_err("Unable to allocate tr_max_lat_wq\n");
1736*4882a593Smuzhiyun return -ENOMEM;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun return 0;
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun late_initcall_sync(latency_fsnotify_init);
1742*4882a593Smuzhiyun
latency_fsnotify(struct trace_array * tr)1743*4882a593Smuzhiyun void latency_fsnotify(struct trace_array *tr)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun if (!fsnotify_wq)
1746*4882a593Smuzhiyun return;
1747*4882a593Smuzhiyun /*
1748*4882a593Smuzhiyun * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1749*4882a593Smuzhiyun * possible that we are called from __schedule() or do_idle(), which
1750*4882a593Smuzhiyun * could cause a deadlock.
1751*4882a593Smuzhiyun */
1752*4882a593Smuzhiyun irq_work_queue(&tr->fsnotify_irqwork);
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun /*
1756*4882a593Smuzhiyun * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1757*4882a593Smuzhiyun * defined(CONFIG_FSNOTIFY)
1758*4882a593Smuzhiyun */
1759*4882a593Smuzhiyun #else
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun #define trace_create_maxlat_file(tr, d_tracer) \
1762*4882a593Smuzhiyun trace_create_file("tracing_max_latency", 0644, d_tracer, \
1763*4882a593Smuzhiyun &tr->max_latency, &tracing_max_lat_fops)
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun #endif
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
1768*4882a593Smuzhiyun /*
1769*4882a593Smuzhiyun * Copy the new maximum trace into the separate maximum-trace
1770*4882a593Smuzhiyun * structure. (this way the maximum trace is permanently saved,
1771*4882a593Smuzhiyun * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1772*4882a593Smuzhiyun */
1773*4882a593Smuzhiyun static void
__update_max_tr(struct trace_array * tr,struct task_struct * tsk,int cpu)1774*4882a593Smuzhiyun __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1775*4882a593Smuzhiyun {
1776*4882a593Smuzhiyun struct array_buffer *trace_buf = &tr->array_buffer;
1777*4882a593Smuzhiyun struct array_buffer *max_buf = &tr->max_buffer;
1778*4882a593Smuzhiyun struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1779*4882a593Smuzhiyun struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun max_buf->cpu = cpu;
1782*4882a593Smuzhiyun max_buf->time_start = data->preempt_timestamp;
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun max_data->saved_latency = tr->max_latency;
1785*4882a593Smuzhiyun max_data->critical_start = data->critical_start;
1786*4882a593Smuzhiyun max_data->critical_end = data->critical_end;
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1789*4882a593Smuzhiyun max_data->pid = tsk->pid;
1790*4882a593Smuzhiyun /*
1791*4882a593Smuzhiyun * If tsk == current, then use current_uid(), as that does not use
1792*4882a593Smuzhiyun * RCU. The irq tracer can be called out of RCU scope.
1793*4882a593Smuzhiyun */
1794*4882a593Smuzhiyun if (tsk == current)
1795*4882a593Smuzhiyun max_data->uid = current_uid();
1796*4882a593Smuzhiyun else
1797*4882a593Smuzhiyun max_data->uid = task_uid(tsk);
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1800*4882a593Smuzhiyun max_data->policy = tsk->policy;
1801*4882a593Smuzhiyun max_data->rt_priority = tsk->rt_priority;
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun /* record this tasks comm */
1804*4882a593Smuzhiyun tracing_record_cmdline(tsk);
1805*4882a593Smuzhiyun latency_fsnotify(tr);
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun /**
1809*4882a593Smuzhiyun * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1810*4882a593Smuzhiyun * @tr: tracer
1811*4882a593Smuzhiyun * @tsk: the task with the latency
1812*4882a593Smuzhiyun * @cpu: The cpu that initiated the trace.
1813*4882a593Smuzhiyun * @cond_data: User data associated with a conditional snapshot
1814*4882a593Smuzhiyun *
1815*4882a593Smuzhiyun * Flip the buffers between the @tr and the max_tr and record information
1816*4882a593Smuzhiyun * about which task was the cause of this latency.
1817*4882a593Smuzhiyun */
1818*4882a593Smuzhiyun void
update_max_tr(struct trace_array * tr,struct task_struct * tsk,int cpu,void * cond_data)1819*4882a593Smuzhiyun update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1820*4882a593Smuzhiyun void *cond_data)
1821*4882a593Smuzhiyun {
1822*4882a593Smuzhiyun if (tr->stop_count)
1823*4882a593Smuzhiyun return;
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun WARN_ON_ONCE(!irqs_disabled());
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun if (!tr->allocated_snapshot) {
1828*4882a593Smuzhiyun /* Only the nop tracer should hit this when disabling */
1829*4882a593Smuzhiyun WARN_ON_ONCE(tr->current_trace != &nop_trace);
1830*4882a593Smuzhiyun return;
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun arch_spin_lock(&tr->max_lock);
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun /* Inherit the recordable setting from array_buffer */
1836*4882a593Smuzhiyun if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1837*4882a593Smuzhiyun ring_buffer_record_on(tr->max_buffer.buffer);
1838*4882a593Smuzhiyun else
1839*4882a593Smuzhiyun ring_buffer_record_off(tr->max_buffer.buffer);
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
1842*4882a593Smuzhiyun if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1843*4882a593Smuzhiyun goto out_unlock;
1844*4882a593Smuzhiyun #endif
1845*4882a593Smuzhiyun swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun __update_max_tr(tr, tsk, cpu);
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun out_unlock:
1850*4882a593Smuzhiyun arch_spin_unlock(&tr->max_lock);
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun /**
1854*4882a593Smuzhiyun * update_max_tr_single - only copy one trace over, and reset the rest
1855*4882a593Smuzhiyun * @tr: tracer
1856*4882a593Smuzhiyun * @tsk: task with the latency
1857*4882a593Smuzhiyun * @cpu: the cpu of the buffer to copy.
1858*4882a593Smuzhiyun *
1859*4882a593Smuzhiyun * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1860*4882a593Smuzhiyun */
1861*4882a593Smuzhiyun void
update_max_tr_single(struct trace_array * tr,struct task_struct * tsk,int cpu)1862*4882a593Smuzhiyun update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun int ret;
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun if (tr->stop_count)
1867*4882a593Smuzhiyun return;
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun WARN_ON_ONCE(!irqs_disabled());
1870*4882a593Smuzhiyun if (!tr->allocated_snapshot) {
1871*4882a593Smuzhiyun /* Only the nop tracer should hit this when disabling */
1872*4882a593Smuzhiyun WARN_ON_ONCE(tr->current_trace != &nop_trace);
1873*4882a593Smuzhiyun return;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun arch_spin_lock(&tr->max_lock);
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun if (ret == -EBUSY) {
1881*4882a593Smuzhiyun /*
1882*4882a593Smuzhiyun * We failed to swap the buffer due to a commit taking
1883*4882a593Smuzhiyun * place on this CPU. We fail to record, but we reset
1884*4882a593Smuzhiyun * the max trace buffer (no one writes directly to it)
1885*4882a593Smuzhiyun * and flag that it failed.
1886*4882a593Smuzhiyun */
1887*4882a593Smuzhiyun trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1888*4882a593Smuzhiyun "Failed to swap buffers due to commit in progress\n");
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun __update_max_tr(tr, tsk, cpu);
1894*4882a593Smuzhiyun arch_spin_unlock(&tr->max_lock);
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun #endif /* CONFIG_TRACER_MAX_TRACE */
1897*4882a593Smuzhiyun
wait_on_pipe(struct trace_iterator * iter,int full)1898*4882a593Smuzhiyun static int wait_on_pipe(struct trace_iterator *iter, int full)
1899*4882a593Smuzhiyun {
1900*4882a593Smuzhiyun /* Iterators are static, they should be filled or empty */
1901*4882a593Smuzhiyun if (trace_buffer_iter(iter, iter->cpu_file))
1902*4882a593Smuzhiyun return 0;
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1905*4882a593Smuzhiyun full);
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun #ifdef CONFIG_FTRACE_STARTUP_TEST
1909*4882a593Smuzhiyun static bool selftests_can_run;
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun struct trace_selftests {
1912*4882a593Smuzhiyun struct list_head list;
1913*4882a593Smuzhiyun struct tracer *type;
1914*4882a593Smuzhiyun };
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun static LIST_HEAD(postponed_selftests);
1917*4882a593Smuzhiyun
save_selftest(struct tracer * type)1918*4882a593Smuzhiyun static int save_selftest(struct tracer *type)
1919*4882a593Smuzhiyun {
1920*4882a593Smuzhiyun struct trace_selftests *selftest;
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1923*4882a593Smuzhiyun if (!selftest)
1924*4882a593Smuzhiyun return -ENOMEM;
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun selftest->type = type;
1927*4882a593Smuzhiyun list_add(&selftest->list, &postponed_selftests);
1928*4882a593Smuzhiyun return 0;
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun
run_tracer_selftest(struct tracer * type)1931*4882a593Smuzhiyun static int run_tracer_selftest(struct tracer *type)
1932*4882a593Smuzhiyun {
1933*4882a593Smuzhiyun struct trace_array *tr = &global_trace;
1934*4882a593Smuzhiyun struct tracer *saved_tracer = tr->current_trace;
1935*4882a593Smuzhiyun int ret;
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun if (!type->selftest || tracing_selftest_disabled)
1938*4882a593Smuzhiyun return 0;
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun /*
1941*4882a593Smuzhiyun * If a tracer registers early in boot up (before scheduling is
1942*4882a593Smuzhiyun * initialized and such), then do not run its selftests yet.
1943*4882a593Smuzhiyun * Instead, run it a little later in the boot process.
1944*4882a593Smuzhiyun */
1945*4882a593Smuzhiyun if (!selftests_can_run)
1946*4882a593Smuzhiyun return save_selftest(type);
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun /*
1949*4882a593Smuzhiyun * Run a selftest on this tracer.
1950*4882a593Smuzhiyun * Here we reset the trace buffer, and set the current
1951*4882a593Smuzhiyun * tracer to be this tracer. The tracer can then run some
1952*4882a593Smuzhiyun * internal tracing to verify that everything is in order.
1953*4882a593Smuzhiyun * If we fail, we do not register this tracer.
1954*4882a593Smuzhiyun */
1955*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->array_buffer);
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun tr->current_trace = type;
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
1960*4882a593Smuzhiyun if (type->use_max_tr) {
1961*4882a593Smuzhiyun /* If we expanded the buffers, make sure the max is expanded too */
1962*4882a593Smuzhiyun if (ring_buffer_expanded)
1963*4882a593Smuzhiyun ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1964*4882a593Smuzhiyun RING_BUFFER_ALL_CPUS);
1965*4882a593Smuzhiyun tr->allocated_snapshot = true;
1966*4882a593Smuzhiyun }
1967*4882a593Smuzhiyun #endif
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun /* the test is responsible for initializing and enabling */
1970*4882a593Smuzhiyun pr_info("Testing tracer %s: ", type->name);
1971*4882a593Smuzhiyun ret = type->selftest(type, tr);
1972*4882a593Smuzhiyun /* the test is responsible for resetting too */
1973*4882a593Smuzhiyun tr->current_trace = saved_tracer;
1974*4882a593Smuzhiyun if (ret) {
1975*4882a593Smuzhiyun printk(KERN_CONT "FAILED!\n");
1976*4882a593Smuzhiyun /* Add the warning after printing 'FAILED' */
1977*4882a593Smuzhiyun WARN_ON(1);
1978*4882a593Smuzhiyun return -1;
1979*4882a593Smuzhiyun }
1980*4882a593Smuzhiyun /* Only reset on passing, to avoid touching corrupted buffers */
1981*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->array_buffer);
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
1984*4882a593Smuzhiyun if (type->use_max_tr) {
1985*4882a593Smuzhiyun tr->allocated_snapshot = false;
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun /* Shrink the max buffer again */
1988*4882a593Smuzhiyun if (ring_buffer_expanded)
1989*4882a593Smuzhiyun ring_buffer_resize(tr->max_buffer.buffer, 1,
1990*4882a593Smuzhiyun RING_BUFFER_ALL_CPUS);
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun #endif
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun printk(KERN_CONT "PASSED\n");
1995*4882a593Smuzhiyun return 0;
1996*4882a593Smuzhiyun }
1997*4882a593Smuzhiyun
init_trace_selftests(void)1998*4882a593Smuzhiyun static __init int init_trace_selftests(void)
1999*4882a593Smuzhiyun {
2000*4882a593Smuzhiyun struct trace_selftests *p, *n;
2001*4882a593Smuzhiyun struct tracer *t, **last;
2002*4882a593Smuzhiyun int ret;
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun selftests_can_run = true;
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun if (list_empty(&postponed_selftests))
2009*4882a593Smuzhiyun goto out;
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyun pr_info("Running postponed tracer tests:\n");
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun tracing_selftest_running = true;
2014*4882a593Smuzhiyun list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2015*4882a593Smuzhiyun /* This loop can take minutes when sanitizers are enabled, so
2016*4882a593Smuzhiyun * lets make sure we allow RCU processing.
2017*4882a593Smuzhiyun */
2018*4882a593Smuzhiyun cond_resched();
2019*4882a593Smuzhiyun ret = run_tracer_selftest(p->type);
2020*4882a593Smuzhiyun /* If the test fails, then warn and remove from available_tracers */
2021*4882a593Smuzhiyun if (ret < 0) {
2022*4882a593Smuzhiyun WARN(1, "tracer: %s failed selftest, disabling\n",
2023*4882a593Smuzhiyun p->type->name);
2024*4882a593Smuzhiyun last = &trace_types;
2025*4882a593Smuzhiyun for (t = trace_types; t; t = t->next) {
2026*4882a593Smuzhiyun if (t == p->type) {
2027*4882a593Smuzhiyun *last = t->next;
2028*4882a593Smuzhiyun break;
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun last = &t->next;
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun list_del(&p->list);
2034*4882a593Smuzhiyun kfree(p);
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun tracing_selftest_running = false;
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun out:
2039*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun return 0;
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun core_initcall(init_trace_selftests);
2044*4882a593Smuzhiyun #else
run_tracer_selftest(struct tracer * type)2045*4882a593Smuzhiyun static inline int run_tracer_selftest(struct tracer *type)
2046*4882a593Smuzhiyun {
2047*4882a593Smuzhiyun return 0;
2048*4882a593Smuzhiyun }
2049*4882a593Smuzhiyun #endif /* CONFIG_FTRACE_STARTUP_TEST */
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun static void __init apply_trace_boot_options(void);
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun /**
2056*4882a593Smuzhiyun * register_tracer - register a tracer with the ftrace system.
2057*4882a593Smuzhiyun * @type: the plugin for the tracer
2058*4882a593Smuzhiyun *
2059*4882a593Smuzhiyun * Register a new plugin tracer.
2060*4882a593Smuzhiyun */
register_tracer(struct tracer * type)2061*4882a593Smuzhiyun int __init register_tracer(struct tracer *type)
2062*4882a593Smuzhiyun {
2063*4882a593Smuzhiyun struct tracer *t;
2064*4882a593Smuzhiyun int ret = 0;
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun if (!type->name) {
2067*4882a593Smuzhiyun pr_info("Tracer must have a name\n");
2068*4882a593Smuzhiyun return -1;
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun if (strlen(type->name) >= MAX_TRACER_SIZE) {
2072*4882a593Smuzhiyun pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2073*4882a593Smuzhiyun return -1;
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun if (security_locked_down(LOCKDOWN_TRACEFS)) {
2077*4882a593Smuzhiyun pr_warn("Can not register tracer %s due to lockdown\n",
2078*4882a593Smuzhiyun type->name);
2079*4882a593Smuzhiyun return -EPERM;
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun tracing_selftest_running = true;
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun for (t = trace_types; t; t = t->next) {
2087*4882a593Smuzhiyun if (strcmp(type->name, t->name) == 0) {
2088*4882a593Smuzhiyun /* already found */
2089*4882a593Smuzhiyun pr_info("Tracer %s already registered\n",
2090*4882a593Smuzhiyun type->name);
2091*4882a593Smuzhiyun ret = -1;
2092*4882a593Smuzhiyun goto out;
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun if (!type->set_flag)
2097*4882a593Smuzhiyun type->set_flag = &dummy_set_flag;
2098*4882a593Smuzhiyun if (!type->flags) {
2099*4882a593Smuzhiyun /*allocate a dummy tracer_flags*/
2100*4882a593Smuzhiyun type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2101*4882a593Smuzhiyun if (!type->flags) {
2102*4882a593Smuzhiyun ret = -ENOMEM;
2103*4882a593Smuzhiyun goto out;
2104*4882a593Smuzhiyun }
2105*4882a593Smuzhiyun type->flags->val = 0;
2106*4882a593Smuzhiyun type->flags->opts = dummy_tracer_opt;
2107*4882a593Smuzhiyun } else
2108*4882a593Smuzhiyun if (!type->flags->opts)
2109*4882a593Smuzhiyun type->flags->opts = dummy_tracer_opt;
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun /* store the tracer for __set_tracer_option */
2112*4882a593Smuzhiyun type->flags->trace = type;
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun ret = run_tracer_selftest(type);
2115*4882a593Smuzhiyun if (ret < 0)
2116*4882a593Smuzhiyun goto out;
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun type->next = trace_types;
2119*4882a593Smuzhiyun trace_types = type;
2120*4882a593Smuzhiyun add_tracer_options(&global_trace, type);
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun out:
2123*4882a593Smuzhiyun tracing_selftest_running = false;
2124*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun if (ret || !default_bootup_tracer)
2127*4882a593Smuzhiyun goto out_unlock;
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2130*4882a593Smuzhiyun goto out_unlock;
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2133*4882a593Smuzhiyun /* Do we want this tracer to start on bootup? */
2134*4882a593Smuzhiyun tracing_set_tracer(&global_trace, type->name);
2135*4882a593Smuzhiyun default_bootup_tracer = NULL;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun apply_trace_boot_options();
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun /* disable other selftests, since this will break it. */
2140*4882a593Smuzhiyun disable_tracing_selftest("running a tracer");
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun out_unlock:
2143*4882a593Smuzhiyun return ret;
2144*4882a593Smuzhiyun }
2145*4882a593Smuzhiyun
tracing_reset_cpu(struct array_buffer * buf,int cpu)2146*4882a593Smuzhiyun static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2147*4882a593Smuzhiyun {
2148*4882a593Smuzhiyun struct trace_buffer *buffer = buf->buffer;
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun if (!buffer)
2151*4882a593Smuzhiyun return;
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun ring_buffer_record_disable(buffer);
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun /* Make sure all commits have finished */
2156*4882a593Smuzhiyun synchronize_rcu();
2157*4882a593Smuzhiyun ring_buffer_reset_cpu(buffer, cpu);
2158*4882a593Smuzhiyun
2159*4882a593Smuzhiyun ring_buffer_record_enable(buffer);
2160*4882a593Smuzhiyun }
2161*4882a593Smuzhiyun
tracing_reset_online_cpus(struct array_buffer * buf)2162*4882a593Smuzhiyun void tracing_reset_online_cpus(struct array_buffer *buf)
2163*4882a593Smuzhiyun {
2164*4882a593Smuzhiyun struct trace_buffer *buffer = buf->buffer;
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun if (!buffer)
2167*4882a593Smuzhiyun return;
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun ring_buffer_record_disable(buffer);
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun /* Make sure all commits have finished */
2172*4882a593Smuzhiyun synchronize_rcu();
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun ring_buffer_reset_online_cpus(buffer);
2177*4882a593Smuzhiyun
2178*4882a593Smuzhiyun ring_buffer_record_enable(buffer);
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun
2181*4882a593Smuzhiyun /* Must have trace_types_lock held */
tracing_reset_all_online_cpus(void)2182*4882a593Smuzhiyun void tracing_reset_all_online_cpus(void)
2183*4882a593Smuzhiyun {
2184*4882a593Smuzhiyun struct trace_array *tr;
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2187*4882a593Smuzhiyun if (!tr->clear_trace)
2188*4882a593Smuzhiyun continue;
2189*4882a593Smuzhiyun tr->clear_trace = false;
2190*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->array_buffer);
2191*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
2192*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->max_buffer);
2193*4882a593Smuzhiyun #endif
2194*4882a593Smuzhiyun }
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun
2197*4882a593Smuzhiyun /*
2198*4882a593Smuzhiyun * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2199*4882a593Smuzhiyun * is the tgid last observed corresponding to pid=i.
2200*4882a593Smuzhiyun */
2201*4882a593Smuzhiyun static int *tgid_map;
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun /* The maximum valid index into tgid_map. */
2204*4882a593Smuzhiyun static size_t tgid_map_max;
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun #define SAVED_CMDLINES_DEFAULT 128
2207*4882a593Smuzhiyun #define NO_CMDLINE_MAP UINT_MAX
2208*4882a593Smuzhiyun /*
2209*4882a593Smuzhiyun * Preemption must be disabled before acquiring trace_cmdline_lock.
2210*4882a593Smuzhiyun * The various trace_arrays' max_lock must be acquired in a context
2211*4882a593Smuzhiyun * where interrupt is disabled.
2212*4882a593Smuzhiyun */
2213*4882a593Smuzhiyun static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2214*4882a593Smuzhiyun struct saved_cmdlines_buffer {
2215*4882a593Smuzhiyun unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2216*4882a593Smuzhiyun unsigned *map_cmdline_to_pid;
2217*4882a593Smuzhiyun unsigned cmdline_num;
2218*4882a593Smuzhiyun int cmdline_idx;
2219*4882a593Smuzhiyun char *saved_cmdlines;
2220*4882a593Smuzhiyun };
2221*4882a593Smuzhiyun static struct saved_cmdlines_buffer *savedcmd;
2222*4882a593Smuzhiyun
get_saved_cmdlines(int idx)2223*4882a593Smuzhiyun static inline char *get_saved_cmdlines(int idx)
2224*4882a593Smuzhiyun {
2225*4882a593Smuzhiyun return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2226*4882a593Smuzhiyun }
2227*4882a593Smuzhiyun
set_cmdline(int idx,const char * cmdline)2228*4882a593Smuzhiyun static inline void set_cmdline(int idx, const char *cmdline)
2229*4882a593Smuzhiyun {
2230*4882a593Smuzhiyun strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2231*4882a593Smuzhiyun }
2232*4882a593Smuzhiyun
allocate_cmdlines_buffer(unsigned int val,struct saved_cmdlines_buffer * s)2233*4882a593Smuzhiyun static int allocate_cmdlines_buffer(unsigned int val,
2234*4882a593Smuzhiyun struct saved_cmdlines_buffer *s)
2235*4882a593Smuzhiyun {
2236*4882a593Smuzhiyun s->map_cmdline_to_pid = kmalloc_array(val,
2237*4882a593Smuzhiyun sizeof(*s->map_cmdline_to_pid),
2238*4882a593Smuzhiyun GFP_KERNEL);
2239*4882a593Smuzhiyun if (!s->map_cmdline_to_pid)
2240*4882a593Smuzhiyun return -ENOMEM;
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2243*4882a593Smuzhiyun if (!s->saved_cmdlines) {
2244*4882a593Smuzhiyun kfree(s->map_cmdline_to_pid);
2245*4882a593Smuzhiyun return -ENOMEM;
2246*4882a593Smuzhiyun }
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun s->cmdline_idx = 0;
2249*4882a593Smuzhiyun s->cmdline_num = val;
2250*4882a593Smuzhiyun memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2251*4882a593Smuzhiyun sizeof(s->map_pid_to_cmdline));
2252*4882a593Smuzhiyun memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2253*4882a593Smuzhiyun val * sizeof(*s->map_cmdline_to_pid));
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun return 0;
2256*4882a593Smuzhiyun }
2257*4882a593Smuzhiyun
trace_create_savedcmd(void)2258*4882a593Smuzhiyun static int trace_create_savedcmd(void)
2259*4882a593Smuzhiyun {
2260*4882a593Smuzhiyun int ret;
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2263*4882a593Smuzhiyun if (!savedcmd)
2264*4882a593Smuzhiyun return -ENOMEM;
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2267*4882a593Smuzhiyun if (ret < 0) {
2268*4882a593Smuzhiyun kfree(savedcmd);
2269*4882a593Smuzhiyun savedcmd = NULL;
2270*4882a593Smuzhiyun return -ENOMEM;
2271*4882a593Smuzhiyun }
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyun return 0;
2274*4882a593Smuzhiyun }
2275*4882a593Smuzhiyun
is_tracing_stopped(void)2276*4882a593Smuzhiyun int is_tracing_stopped(void)
2277*4882a593Smuzhiyun {
2278*4882a593Smuzhiyun return global_trace.stop_count;
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun /**
2282*4882a593Smuzhiyun * tracing_start - quick start of the tracer
2283*4882a593Smuzhiyun *
2284*4882a593Smuzhiyun * If tracing is enabled but was stopped by tracing_stop,
2285*4882a593Smuzhiyun * this will start the tracer back up.
2286*4882a593Smuzhiyun */
tracing_start(void)2287*4882a593Smuzhiyun void tracing_start(void)
2288*4882a593Smuzhiyun {
2289*4882a593Smuzhiyun struct trace_buffer *buffer;
2290*4882a593Smuzhiyun unsigned long flags;
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun if (tracing_disabled)
2293*4882a593Smuzhiyun return;
2294*4882a593Smuzhiyun
2295*4882a593Smuzhiyun raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2296*4882a593Smuzhiyun if (--global_trace.stop_count) {
2297*4882a593Smuzhiyun if (global_trace.stop_count < 0) {
2298*4882a593Smuzhiyun /* Someone screwed up their debugging */
2299*4882a593Smuzhiyun WARN_ON_ONCE(1);
2300*4882a593Smuzhiyun global_trace.stop_count = 0;
2301*4882a593Smuzhiyun }
2302*4882a593Smuzhiyun goto out;
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun /* Prevent the buffers from switching */
2306*4882a593Smuzhiyun arch_spin_lock(&global_trace.max_lock);
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun buffer = global_trace.array_buffer.buffer;
2309*4882a593Smuzhiyun if (buffer)
2310*4882a593Smuzhiyun ring_buffer_record_enable(buffer);
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
2313*4882a593Smuzhiyun buffer = global_trace.max_buffer.buffer;
2314*4882a593Smuzhiyun if (buffer)
2315*4882a593Smuzhiyun ring_buffer_record_enable(buffer);
2316*4882a593Smuzhiyun #endif
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun arch_spin_unlock(&global_trace.max_lock);
2319*4882a593Smuzhiyun
2320*4882a593Smuzhiyun out:
2321*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun
tracing_start_tr(struct trace_array * tr)2324*4882a593Smuzhiyun static void tracing_start_tr(struct trace_array *tr)
2325*4882a593Smuzhiyun {
2326*4882a593Smuzhiyun struct trace_buffer *buffer;
2327*4882a593Smuzhiyun unsigned long flags;
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun if (tracing_disabled)
2330*4882a593Smuzhiyun return;
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun /* If global, we need to also start the max tracer */
2333*4882a593Smuzhiyun if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2334*4882a593Smuzhiyun return tracing_start();
2335*4882a593Smuzhiyun
2336*4882a593Smuzhiyun raw_spin_lock_irqsave(&tr->start_lock, flags);
2337*4882a593Smuzhiyun
2338*4882a593Smuzhiyun if (--tr->stop_count) {
2339*4882a593Smuzhiyun if (tr->stop_count < 0) {
2340*4882a593Smuzhiyun /* Someone screwed up their debugging */
2341*4882a593Smuzhiyun WARN_ON_ONCE(1);
2342*4882a593Smuzhiyun tr->stop_count = 0;
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun goto out;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun
2347*4882a593Smuzhiyun buffer = tr->array_buffer.buffer;
2348*4882a593Smuzhiyun if (buffer)
2349*4882a593Smuzhiyun ring_buffer_record_enable(buffer);
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun out:
2352*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2353*4882a593Smuzhiyun }
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun /**
2356*4882a593Smuzhiyun * tracing_stop - quick stop of the tracer
2357*4882a593Smuzhiyun *
2358*4882a593Smuzhiyun * Light weight way to stop tracing. Use in conjunction with
2359*4882a593Smuzhiyun * tracing_start.
2360*4882a593Smuzhiyun */
tracing_stop(void)2361*4882a593Smuzhiyun void tracing_stop(void)
2362*4882a593Smuzhiyun {
2363*4882a593Smuzhiyun struct trace_buffer *buffer;
2364*4882a593Smuzhiyun unsigned long flags;
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2367*4882a593Smuzhiyun if (global_trace.stop_count++)
2368*4882a593Smuzhiyun goto out;
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun /* Prevent the buffers from switching */
2371*4882a593Smuzhiyun arch_spin_lock(&global_trace.max_lock);
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun buffer = global_trace.array_buffer.buffer;
2374*4882a593Smuzhiyun if (buffer)
2375*4882a593Smuzhiyun ring_buffer_record_disable(buffer);
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
2378*4882a593Smuzhiyun buffer = global_trace.max_buffer.buffer;
2379*4882a593Smuzhiyun if (buffer)
2380*4882a593Smuzhiyun ring_buffer_record_disable(buffer);
2381*4882a593Smuzhiyun #endif
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun arch_spin_unlock(&global_trace.max_lock);
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun out:
2386*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2387*4882a593Smuzhiyun }
2388*4882a593Smuzhiyun
tracing_stop_tr(struct trace_array * tr)2389*4882a593Smuzhiyun static void tracing_stop_tr(struct trace_array *tr)
2390*4882a593Smuzhiyun {
2391*4882a593Smuzhiyun struct trace_buffer *buffer;
2392*4882a593Smuzhiyun unsigned long flags;
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun /* If global, we need to also stop the max tracer */
2395*4882a593Smuzhiyun if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2396*4882a593Smuzhiyun return tracing_stop();
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun raw_spin_lock_irqsave(&tr->start_lock, flags);
2399*4882a593Smuzhiyun if (tr->stop_count++)
2400*4882a593Smuzhiyun goto out;
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun buffer = tr->array_buffer.buffer;
2403*4882a593Smuzhiyun if (buffer)
2404*4882a593Smuzhiyun ring_buffer_record_disable(buffer);
2405*4882a593Smuzhiyun
2406*4882a593Smuzhiyun out:
2407*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2408*4882a593Smuzhiyun }
2409*4882a593Smuzhiyun
trace_save_cmdline(struct task_struct * tsk)2410*4882a593Smuzhiyun static int trace_save_cmdline(struct task_struct *tsk)
2411*4882a593Smuzhiyun {
2412*4882a593Smuzhiyun unsigned tpid, idx;
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun /* treat recording of idle task as a success */
2415*4882a593Smuzhiyun if (!tsk->pid)
2416*4882a593Smuzhiyun return 1;
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun /*
2421*4882a593Smuzhiyun * It's not the end of the world if we don't get
2422*4882a593Smuzhiyun * the lock, but we also don't want to spin
2423*4882a593Smuzhiyun * nor do we want to disable interrupts,
2424*4882a593Smuzhiyun * so if we miss here, then better luck next time.
2425*4882a593Smuzhiyun *
2426*4882a593Smuzhiyun * This is called within the scheduler and wake up, so interrupts
2427*4882a593Smuzhiyun * had better been disabled and run queue lock been held.
2428*4882a593Smuzhiyun */
2429*4882a593Smuzhiyun lockdep_assert_preemption_disabled();
2430*4882a593Smuzhiyun if (!arch_spin_trylock(&trace_cmdline_lock))
2431*4882a593Smuzhiyun return 0;
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun idx = savedcmd->map_pid_to_cmdline[tpid];
2434*4882a593Smuzhiyun if (idx == NO_CMDLINE_MAP) {
2435*4882a593Smuzhiyun idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun savedcmd->map_pid_to_cmdline[tpid] = idx;
2438*4882a593Smuzhiyun savedcmd->cmdline_idx = idx;
2439*4882a593Smuzhiyun }
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2442*4882a593Smuzhiyun set_cmdline(idx, tsk->comm);
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun arch_spin_unlock(&trace_cmdline_lock);
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun return 1;
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun
__trace_find_cmdline(int pid,char comm[])2449*4882a593Smuzhiyun static void __trace_find_cmdline(int pid, char comm[])
2450*4882a593Smuzhiyun {
2451*4882a593Smuzhiyun unsigned map;
2452*4882a593Smuzhiyun int tpid;
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun if (!pid) {
2455*4882a593Smuzhiyun strcpy(comm, "<idle>");
2456*4882a593Smuzhiyun return;
2457*4882a593Smuzhiyun }
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun if (WARN_ON_ONCE(pid < 0)) {
2460*4882a593Smuzhiyun strcpy(comm, "<XXX>");
2461*4882a593Smuzhiyun return;
2462*4882a593Smuzhiyun }
2463*4882a593Smuzhiyun
2464*4882a593Smuzhiyun tpid = pid & (PID_MAX_DEFAULT - 1);
2465*4882a593Smuzhiyun map = savedcmd->map_pid_to_cmdline[tpid];
2466*4882a593Smuzhiyun if (map != NO_CMDLINE_MAP) {
2467*4882a593Smuzhiyun tpid = savedcmd->map_cmdline_to_pid[map];
2468*4882a593Smuzhiyun if (tpid == pid) {
2469*4882a593Smuzhiyun strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2470*4882a593Smuzhiyun return;
2471*4882a593Smuzhiyun }
2472*4882a593Smuzhiyun }
2473*4882a593Smuzhiyun strcpy(comm, "<...>");
2474*4882a593Smuzhiyun }
2475*4882a593Smuzhiyun
trace_find_cmdline(int pid,char comm[])2476*4882a593Smuzhiyun void trace_find_cmdline(int pid, char comm[])
2477*4882a593Smuzhiyun {
2478*4882a593Smuzhiyun preempt_disable();
2479*4882a593Smuzhiyun arch_spin_lock(&trace_cmdline_lock);
2480*4882a593Smuzhiyun
2481*4882a593Smuzhiyun __trace_find_cmdline(pid, comm);
2482*4882a593Smuzhiyun
2483*4882a593Smuzhiyun arch_spin_unlock(&trace_cmdline_lock);
2484*4882a593Smuzhiyun preempt_enable();
2485*4882a593Smuzhiyun }
2486*4882a593Smuzhiyun
trace_find_tgid_ptr(int pid)2487*4882a593Smuzhiyun static int *trace_find_tgid_ptr(int pid)
2488*4882a593Smuzhiyun {
2489*4882a593Smuzhiyun /*
2490*4882a593Smuzhiyun * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2491*4882a593Smuzhiyun * if we observe a non-NULL tgid_map then we also observe the correct
2492*4882a593Smuzhiyun * tgid_map_max.
2493*4882a593Smuzhiyun */
2494*4882a593Smuzhiyun int *map = smp_load_acquire(&tgid_map);
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun if (unlikely(!map || pid > tgid_map_max))
2497*4882a593Smuzhiyun return NULL;
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun return &map[pid];
2500*4882a593Smuzhiyun }
2501*4882a593Smuzhiyun
trace_find_tgid(int pid)2502*4882a593Smuzhiyun int trace_find_tgid(int pid)
2503*4882a593Smuzhiyun {
2504*4882a593Smuzhiyun int *ptr = trace_find_tgid_ptr(pid);
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun return ptr ? *ptr : 0;
2507*4882a593Smuzhiyun }
2508*4882a593Smuzhiyun
trace_save_tgid(struct task_struct * tsk)2509*4882a593Smuzhiyun static int trace_save_tgid(struct task_struct *tsk)
2510*4882a593Smuzhiyun {
2511*4882a593Smuzhiyun int *ptr;
2512*4882a593Smuzhiyun
2513*4882a593Smuzhiyun /* treat recording of idle task as a success */
2514*4882a593Smuzhiyun if (!tsk->pid)
2515*4882a593Smuzhiyun return 1;
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun ptr = trace_find_tgid_ptr(tsk->pid);
2518*4882a593Smuzhiyun if (!ptr)
2519*4882a593Smuzhiyun return 0;
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun *ptr = tsk->tgid;
2522*4882a593Smuzhiyun return 1;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun
tracing_record_taskinfo_skip(int flags)2525*4882a593Smuzhiyun static bool tracing_record_taskinfo_skip(int flags)
2526*4882a593Smuzhiyun {
2527*4882a593Smuzhiyun if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2528*4882a593Smuzhiyun return true;
2529*4882a593Smuzhiyun if (!__this_cpu_read(trace_taskinfo_save))
2530*4882a593Smuzhiyun return true;
2531*4882a593Smuzhiyun return false;
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun /**
2535*4882a593Smuzhiyun * tracing_record_taskinfo - record the task info of a task
2536*4882a593Smuzhiyun *
2537*4882a593Smuzhiyun * @task: task to record
2538*4882a593Smuzhiyun * @flags: TRACE_RECORD_CMDLINE for recording comm
2539*4882a593Smuzhiyun * TRACE_RECORD_TGID for recording tgid
2540*4882a593Smuzhiyun */
tracing_record_taskinfo(struct task_struct * task,int flags)2541*4882a593Smuzhiyun void tracing_record_taskinfo(struct task_struct *task, int flags)
2542*4882a593Smuzhiyun {
2543*4882a593Smuzhiyun bool done;
2544*4882a593Smuzhiyun
2545*4882a593Smuzhiyun if (tracing_record_taskinfo_skip(flags))
2546*4882a593Smuzhiyun return;
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun /*
2549*4882a593Smuzhiyun * Record as much task information as possible. If some fail, continue
2550*4882a593Smuzhiyun * to try to record the others.
2551*4882a593Smuzhiyun */
2552*4882a593Smuzhiyun done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2553*4882a593Smuzhiyun done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun /* If recording any information failed, retry again soon. */
2556*4882a593Smuzhiyun if (!done)
2557*4882a593Smuzhiyun return;
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun __this_cpu_write(trace_taskinfo_save, false);
2560*4882a593Smuzhiyun }
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun /**
2563*4882a593Smuzhiyun * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2564*4882a593Smuzhiyun *
2565*4882a593Smuzhiyun * @prev: previous task during sched_switch
2566*4882a593Smuzhiyun * @next: next task during sched_switch
2567*4882a593Smuzhiyun * @flags: TRACE_RECORD_CMDLINE for recording comm
2568*4882a593Smuzhiyun * TRACE_RECORD_TGID for recording tgid
2569*4882a593Smuzhiyun */
tracing_record_taskinfo_sched_switch(struct task_struct * prev,struct task_struct * next,int flags)2570*4882a593Smuzhiyun void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2571*4882a593Smuzhiyun struct task_struct *next, int flags)
2572*4882a593Smuzhiyun {
2573*4882a593Smuzhiyun bool done;
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun if (tracing_record_taskinfo_skip(flags))
2576*4882a593Smuzhiyun return;
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun /*
2579*4882a593Smuzhiyun * Record as much task information as possible. If some fail, continue
2580*4882a593Smuzhiyun * to try to record the others.
2581*4882a593Smuzhiyun */
2582*4882a593Smuzhiyun done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2583*4882a593Smuzhiyun done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2584*4882a593Smuzhiyun done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2585*4882a593Smuzhiyun done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2586*4882a593Smuzhiyun
2587*4882a593Smuzhiyun /* If recording any information failed, retry again soon. */
2588*4882a593Smuzhiyun if (!done)
2589*4882a593Smuzhiyun return;
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun __this_cpu_write(trace_taskinfo_save, false);
2592*4882a593Smuzhiyun }
2593*4882a593Smuzhiyun
2594*4882a593Smuzhiyun /* Helpers to record a specific task information */
tracing_record_cmdline(struct task_struct * task)2595*4882a593Smuzhiyun void tracing_record_cmdline(struct task_struct *task)
2596*4882a593Smuzhiyun {
2597*4882a593Smuzhiyun tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2598*4882a593Smuzhiyun }
2599*4882a593Smuzhiyun
tracing_record_tgid(struct task_struct * task)2600*4882a593Smuzhiyun void tracing_record_tgid(struct task_struct *task)
2601*4882a593Smuzhiyun {
2602*4882a593Smuzhiyun tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2603*4882a593Smuzhiyun }
2604*4882a593Smuzhiyun
2605*4882a593Smuzhiyun /*
2606*4882a593Smuzhiyun * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2607*4882a593Smuzhiyun * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2608*4882a593Smuzhiyun * simplifies those functions and keeps them in sync.
2609*4882a593Smuzhiyun */
trace_handle_return(struct trace_seq * s)2610*4882a593Smuzhiyun enum print_line_t trace_handle_return(struct trace_seq *s)
2611*4882a593Smuzhiyun {
2612*4882a593Smuzhiyun return trace_seq_has_overflowed(s) ?
2613*4882a593Smuzhiyun TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2614*4882a593Smuzhiyun }
2615*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_handle_return);
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun void
tracing_generic_entry_update(struct trace_entry * entry,unsigned short type,unsigned long flags,int pc)2618*4882a593Smuzhiyun tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2619*4882a593Smuzhiyun unsigned long flags, int pc)
2620*4882a593Smuzhiyun {
2621*4882a593Smuzhiyun struct task_struct *tsk = current;
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun entry->preempt_count = pc & 0xff;
2624*4882a593Smuzhiyun entry->pid = (tsk) ? tsk->pid : 0;
2625*4882a593Smuzhiyun entry->type = type;
2626*4882a593Smuzhiyun entry->flags =
2627*4882a593Smuzhiyun #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2628*4882a593Smuzhiyun (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2629*4882a593Smuzhiyun #else
2630*4882a593Smuzhiyun TRACE_FLAG_IRQS_NOSUPPORT |
2631*4882a593Smuzhiyun #endif
2632*4882a593Smuzhiyun ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2633*4882a593Smuzhiyun ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2634*4882a593Smuzhiyun ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2635*4882a593Smuzhiyun (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2636*4882a593Smuzhiyun (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2637*4882a593Smuzhiyun }
2638*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer * buffer,int type,unsigned long len,unsigned long flags,int pc)2641*4882a593Smuzhiyun trace_buffer_lock_reserve(struct trace_buffer *buffer,
2642*4882a593Smuzhiyun int type,
2643*4882a593Smuzhiyun unsigned long len,
2644*4882a593Smuzhiyun unsigned long flags, int pc)
2645*4882a593Smuzhiyun {
2646*4882a593Smuzhiyun return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2647*4882a593Smuzhiyun }
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2650*4882a593Smuzhiyun DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2651*4882a593Smuzhiyun static int trace_buffered_event_ref;
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun /**
2654*4882a593Smuzhiyun * trace_buffered_event_enable - enable buffering events
2655*4882a593Smuzhiyun *
2656*4882a593Smuzhiyun * When events are being filtered, it is quicker to use a temporary
2657*4882a593Smuzhiyun * buffer to write the event data into if there's a likely chance
2658*4882a593Smuzhiyun * that it will not be committed. The discard of the ring buffer
2659*4882a593Smuzhiyun * is not as fast as committing, and is much slower than copying
2660*4882a593Smuzhiyun * a commit.
2661*4882a593Smuzhiyun *
2662*4882a593Smuzhiyun * When an event is to be filtered, allocate per cpu buffers to
2663*4882a593Smuzhiyun * write the event data into, and if the event is filtered and discarded
2664*4882a593Smuzhiyun * it is simply dropped, otherwise, the entire data is to be committed
2665*4882a593Smuzhiyun * in one shot.
2666*4882a593Smuzhiyun */
trace_buffered_event_enable(void)2667*4882a593Smuzhiyun void trace_buffered_event_enable(void)
2668*4882a593Smuzhiyun {
2669*4882a593Smuzhiyun struct ring_buffer_event *event;
2670*4882a593Smuzhiyun struct page *page;
2671*4882a593Smuzhiyun int cpu;
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun if (trace_buffered_event_ref++)
2676*4882a593Smuzhiyun return;
2677*4882a593Smuzhiyun
2678*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
2679*4882a593Smuzhiyun page = alloc_pages_node(cpu_to_node(cpu),
2680*4882a593Smuzhiyun GFP_KERNEL | __GFP_NORETRY, 0);
2681*4882a593Smuzhiyun if (!page)
2682*4882a593Smuzhiyun goto failed;
2683*4882a593Smuzhiyun
2684*4882a593Smuzhiyun event = page_address(page);
2685*4882a593Smuzhiyun memset(event, 0, sizeof(*event));
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun per_cpu(trace_buffered_event, cpu) = event;
2688*4882a593Smuzhiyun
2689*4882a593Smuzhiyun preempt_disable();
2690*4882a593Smuzhiyun if (cpu == smp_processor_id() &&
2691*4882a593Smuzhiyun __this_cpu_read(trace_buffered_event) !=
2692*4882a593Smuzhiyun per_cpu(trace_buffered_event, cpu))
2693*4882a593Smuzhiyun WARN_ON_ONCE(1);
2694*4882a593Smuzhiyun preempt_enable();
2695*4882a593Smuzhiyun }
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun return;
2698*4882a593Smuzhiyun failed:
2699*4882a593Smuzhiyun trace_buffered_event_disable();
2700*4882a593Smuzhiyun }
2701*4882a593Smuzhiyun
enable_trace_buffered_event(void * data)2702*4882a593Smuzhiyun static void enable_trace_buffered_event(void *data)
2703*4882a593Smuzhiyun {
2704*4882a593Smuzhiyun /* Probably not needed, but do it anyway */
2705*4882a593Smuzhiyun smp_rmb();
2706*4882a593Smuzhiyun this_cpu_dec(trace_buffered_event_cnt);
2707*4882a593Smuzhiyun }
2708*4882a593Smuzhiyun
disable_trace_buffered_event(void * data)2709*4882a593Smuzhiyun static void disable_trace_buffered_event(void *data)
2710*4882a593Smuzhiyun {
2711*4882a593Smuzhiyun this_cpu_inc(trace_buffered_event_cnt);
2712*4882a593Smuzhiyun }
2713*4882a593Smuzhiyun
2714*4882a593Smuzhiyun /**
2715*4882a593Smuzhiyun * trace_buffered_event_disable - disable buffering events
2716*4882a593Smuzhiyun *
2717*4882a593Smuzhiyun * When a filter is removed, it is faster to not use the buffered
2718*4882a593Smuzhiyun * events, and to commit directly into the ring buffer. Free up
2719*4882a593Smuzhiyun * the temp buffers when there are no more users. This requires
2720*4882a593Smuzhiyun * special synchronization with current events.
2721*4882a593Smuzhiyun */
trace_buffered_event_disable(void)2722*4882a593Smuzhiyun void trace_buffered_event_disable(void)
2723*4882a593Smuzhiyun {
2724*4882a593Smuzhiyun int cpu;
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun if (WARN_ON_ONCE(!trace_buffered_event_ref))
2729*4882a593Smuzhiyun return;
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun if (--trace_buffered_event_ref)
2732*4882a593Smuzhiyun return;
2733*4882a593Smuzhiyun
2734*4882a593Smuzhiyun preempt_disable();
2735*4882a593Smuzhiyun /* For each CPU, set the buffer as used. */
2736*4882a593Smuzhiyun smp_call_function_many(tracing_buffer_mask,
2737*4882a593Smuzhiyun disable_trace_buffered_event, NULL, 1);
2738*4882a593Smuzhiyun preempt_enable();
2739*4882a593Smuzhiyun
2740*4882a593Smuzhiyun /* Wait for all current users to finish */
2741*4882a593Smuzhiyun synchronize_rcu();
2742*4882a593Smuzhiyun
2743*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
2744*4882a593Smuzhiyun free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2745*4882a593Smuzhiyun per_cpu(trace_buffered_event, cpu) = NULL;
2746*4882a593Smuzhiyun }
2747*4882a593Smuzhiyun /*
2748*4882a593Smuzhiyun * Make sure trace_buffered_event is NULL before clearing
2749*4882a593Smuzhiyun * trace_buffered_event_cnt.
2750*4882a593Smuzhiyun */
2751*4882a593Smuzhiyun smp_wmb();
2752*4882a593Smuzhiyun
2753*4882a593Smuzhiyun preempt_disable();
2754*4882a593Smuzhiyun /* Do the work on each cpu */
2755*4882a593Smuzhiyun smp_call_function_many(tracing_buffer_mask,
2756*4882a593Smuzhiyun enable_trace_buffered_event, NULL, 1);
2757*4882a593Smuzhiyun preempt_enable();
2758*4882a593Smuzhiyun }
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun static struct trace_buffer *temp_buffer;
2761*4882a593Smuzhiyun
2762*4882a593Smuzhiyun struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer ** current_rb,struct trace_event_file * trace_file,int type,unsigned long len,unsigned long flags,int pc)2763*4882a593Smuzhiyun trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2764*4882a593Smuzhiyun struct trace_event_file *trace_file,
2765*4882a593Smuzhiyun int type, unsigned long len,
2766*4882a593Smuzhiyun unsigned long flags, int pc)
2767*4882a593Smuzhiyun {
2768*4882a593Smuzhiyun struct ring_buffer_event *entry;
2769*4882a593Smuzhiyun int val;
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun *current_rb = trace_file->tr->array_buffer.buffer;
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2774*4882a593Smuzhiyun (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2775*4882a593Smuzhiyun (entry = this_cpu_read(trace_buffered_event))) {
2776*4882a593Smuzhiyun /* Try to use the per cpu buffer first */
2777*4882a593Smuzhiyun val = this_cpu_inc_return(trace_buffered_event_cnt);
2778*4882a593Smuzhiyun if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
2779*4882a593Smuzhiyun trace_event_setup(entry, type, flags, pc);
2780*4882a593Smuzhiyun entry->array[0] = len;
2781*4882a593Smuzhiyun return entry;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun this_cpu_dec(trace_buffered_event_cnt);
2784*4882a593Smuzhiyun }
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun entry = __trace_buffer_lock_reserve(*current_rb,
2787*4882a593Smuzhiyun type, len, flags, pc);
2788*4882a593Smuzhiyun /*
2789*4882a593Smuzhiyun * If tracing is off, but we have triggers enabled
2790*4882a593Smuzhiyun * we still need to look at the event data. Use the temp_buffer
2791*4882a593Smuzhiyun * to store the trace event for the trigger to use. It's recursive
2792*4882a593Smuzhiyun * safe and will not be recorded anywhere.
2793*4882a593Smuzhiyun */
2794*4882a593Smuzhiyun if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2795*4882a593Smuzhiyun *current_rb = temp_buffer;
2796*4882a593Smuzhiyun entry = __trace_buffer_lock_reserve(*current_rb,
2797*4882a593Smuzhiyun type, len, flags, pc);
2798*4882a593Smuzhiyun }
2799*4882a593Smuzhiyun return entry;
2800*4882a593Smuzhiyun }
2801*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2802*4882a593Smuzhiyun
2803*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2804*4882a593Smuzhiyun static DEFINE_MUTEX(tracepoint_printk_mutex);
2805*4882a593Smuzhiyun
output_printk(struct trace_event_buffer * fbuffer)2806*4882a593Smuzhiyun static void output_printk(struct trace_event_buffer *fbuffer)
2807*4882a593Smuzhiyun {
2808*4882a593Smuzhiyun struct trace_event_call *event_call;
2809*4882a593Smuzhiyun struct trace_event_file *file;
2810*4882a593Smuzhiyun struct trace_event *event;
2811*4882a593Smuzhiyun unsigned long flags;
2812*4882a593Smuzhiyun struct trace_iterator *iter = tracepoint_print_iter;
2813*4882a593Smuzhiyun
2814*4882a593Smuzhiyun /* We should never get here if iter is NULL */
2815*4882a593Smuzhiyun if (WARN_ON_ONCE(!iter))
2816*4882a593Smuzhiyun return;
2817*4882a593Smuzhiyun
2818*4882a593Smuzhiyun event_call = fbuffer->trace_file->event_call;
2819*4882a593Smuzhiyun if (!event_call || !event_call->event.funcs ||
2820*4882a593Smuzhiyun !event_call->event.funcs->trace)
2821*4882a593Smuzhiyun return;
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun file = fbuffer->trace_file;
2824*4882a593Smuzhiyun if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2825*4882a593Smuzhiyun (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2826*4882a593Smuzhiyun !filter_match_preds(file->filter, fbuffer->entry)))
2827*4882a593Smuzhiyun return;
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun event = &fbuffer->trace_file->event_call->event;
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2832*4882a593Smuzhiyun trace_seq_init(&iter->seq);
2833*4882a593Smuzhiyun iter->ent = fbuffer->entry;
2834*4882a593Smuzhiyun event_call->event.funcs->trace(iter, 0, event);
2835*4882a593Smuzhiyun trace_seq_putc(&iter->seq, 0);
2836*4882a593Smuzhiyun printk("%s", iter->seq.buffer);
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2839*4882a593Smuzhiyun }
2840*4882a593Smuzhiyun
tracepoint_printk_sysctl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2841*4882a593Smuzhiyun int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2842*4882a593Smuzhiyun void *buffer, size_t *lenp,
2843*4882a593Smuzhiyun loff_t *ppos)
2844*4882a593Smuzhiyun {
2845*4882a593Smuzhiyun int save_tracepoint_printk;
2846*4882a593Smuzhiyun int ret;
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun mutex_lock(&tracepoint_printk_mutex);
2849*4882a593Smuzhiyun save_tracepoint_printk = tracepoint_printk;
2850*4882a593Smuzhiyun
2851*4882a593Smuzhiyun ret = proc_dointvec(table, write, buffer, lenp, ppos);
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun /*
2854*4882a593Smuzhiyun * This will force exiting early, as tracepoint_printk
2855*4882a593Smuzhiyun * is always zero when tracepoint_printk_iter is not allocated
2856*4882a593Smuzhiyun */
2857*4882a593Smuzhiyun if (!tracepoint_print_iter)
2858*4882a593Smuzhiyun tracepoint_printk = 0;
2859*4882a593Smuzhiyun
2860*4882a593Smuzhiyun if (save_tracepoint_printk == tracepoint_printk)
2861*4882a593Smuzhiyun goto out;
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun if (tracepoint_printk)
2864*4882a593Smuzhiyun static_key_enable(&tracepoint_printk_key.key);
2865*4882a593Smuzhiyun else
2866*4882a593Smuzhiyun static_key_disable(&tracepoint_printk_key.key);
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun out:
2869*4882a593Smuzhiyun mutex_unlock(&tracepoint_printk_mutex);
2870*4882a593Smuzhiyun
2871*4882a593Smuzhiyun return ret;
2872*4882a593Smuzhiyun }
2873*4882a593Smuzhiyun
trace_event_buffer_commit(struct trace_event_buffer * fbuffer)2874*4882a593Smuzhiyun void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2875*4882a593Smuzhiyun {
2876*4882a593Smuzhiyun if (static_key_false(&tracepoint_printk_key.key))
2877*4882a593Smuzhiyun output_printk(fbuffer);
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun if (static_branch_unlikely(&trace_event_exports_enabled))
2880*4882a593Smuzhiyun ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2881*4882a593Smuzhiyun event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2882*4882a593Smuzhiyun fbuffer->event, fbuffer->entry,
2883*4882a593Smuzhiyun fbuffer->flags, fbuffer->pc, fbuffer->regs);
2884*4882a593Smuzhiyun }
2885*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2886*4882a593Smuzhiyun
2887*4882a593Smuzhiyun /*
2888*4882a593Smuzhiyun * Skip 3:
2889*4882a593Smuzhiyun *
2890*4882a593Smuzhiyun * trace_buffer_unlock_commit_regs()
2891*4882a593Smuzhiyun * trace_event_buffer_commit()
2892*4882a593Smuzhiyun * trace_event_raw_event_xxx()
2893*4882a593Smuzhiyun */
2894*4882a593Smuzhiyun # define STACK_SKIP 3
2895*4882a593Smuzhiyun
trace_buffer_unlock_commit_regs(struct trace_array * tr,struct trace_buffer * buffer,struct ring_buffer_event * event,unsigned long flags,int pc,struct pt_regs * regs)2896*4882a593Smuzhiyun void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2897*4882a593Smuzhiyun struct trace_buffer *buffer,
2898*4882a593Smuzhiyun struct ring_buffer_event *event,
2899*4882a593Smuzhiyun unsigned long flags, int pc,
2900*4882a593Smuzhiyun struct pt_regs *regs)
2901*4882a593Smuzhiyun {
2902*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
2903*4882a593Smuzhiyun
2904*4882a593Smuzhiyun /*
2905*4882a593Smuzhiyun * If regs is not set, then skip the necessary functions.
2906*4882a593Smuzhiyun * Note, we can still get here via blktrace, wakeup tracer
2907*4882a593Smuzhiyun * and mmiotrace, but that's ok if they lose a function or
2908*4882a593Smuzhiyun * two. They are not that meaningful.
2909*4882a593Smuzhiyun */
2910*4882a593Smuzhiyun ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2911*4882a593Smuzhiyun ftrace_trace_userstack(tr, buffer, flags, pc);
2912*4882a593Smuzhiyun }
2913*4882a593Smuzhiyun
2914*4882a593Smuzhiyun /*
2915*4882a593Smuzhiyun * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2916*4882a593Smuzhiyun */
2917*4882a593Smuzhiyun void
trace_buffer_unlock_commit_nostack(struct trace_buffer * buffer,struct ring_buffer_event * event)2918*4882a593Smuzhiyun trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2919*4882a593Smuzhiyun struct ring_buffer_event *event)
2920*4882a593Smuzhiyun {
2921*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun
2924*4882a593Smuzhiyun void
trace_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned long flags,int pc)2925*4882a593Smuzhiyun trace_function(struct trace_array *tr,
2926*4882a593Smuzhiyun unsigned long ip, unsigned long parent_ip, unsigned long flags,
2927*4882a593Smuzhiyun int pc)
2928*4882a593Smuzhiyun {
2929*4882a593Smuzhiyun struct trace_event_call *call = &event_function;
2930*4882a593Smuzhiyun struct trace_buffer *buffer = tr->array_buffer.buffer;
2931*4882a593Smuzhiyun struct ring_buffer_event *event;
2932*4882a593Smuzhiyun struct ftrace_entry *entry;
2933*4882a593Smuzhiyun
2934*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2935*4882a593Smuzhiyun flags, pc);
2936*4882a593Smuzhiyun if (!event)
2937*4882a593Smuzhiyun return;
2938*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
2939*4882a593Smuzhiyun entry->ip = ip;
2940*4882a593Smuzhiyun entry->parent_ip = parent_ip;
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun if (!call_filter_check_discard(call, entry, buffer, event)) {
2943*4882a593Smuzhiyun if (static_branch_unlikely(&trace_function_exports_enabled))
2944*4882a593Smuzhiyun ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2945*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
2946*4882a593Smuzhiyun }
2947*4882a593Smuzhiyun }
2948*4882a593Smuzhiyun
2949*4882a593Smuzhiyun #ifdef CONFIG_STACKTRACE
2950*4882a593Smuzhiyun
2951*4882a593Smuzhiyun /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2952*4882a593Smuzhiyun #define FTRACE_KSTACK_NESTING 4
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2955*4882a593Smuzhiyun
2956*4882a593Smuzhiyun struct ftrace_stack {
2957*4882a593Smuzhiyun unsigned long calls[FTRACE_KSTACK_ENTRIES];
2958*4882a593Smuzhiyun };
2959*4882a593Smuzhiyun
2960*4882a593Smuzhiyun
2961*4882a593Smuzhiyun struct ftrace_stacks {
2962*4882a593Smuzhiyun struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2963*4882a593Smuzhiyun };
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2966*4882a593Smuzhiyun static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2967*4882a593Smuzhiyun
__ftrace_trace_stack(struct trace_buffer * buffer,unsigned long flags,int skip,int pc,struct pt_regs * regs)2968*4882a593Smuzhiyun static void __ftrace_trace_stack(struct trace_buffer *buffer,
2969*4882a593Smuzhiyun unsigned long flags,
2970*4882a593Smuzhiyun int skip, int pc, struct pt_regs *regs)
2971*4882a593Smuzhiyun {
2972*4882a593Smuzhiyun struct trace_event_call *call = &event_kernel_stack;
2973*4882a593Smuzhiyun struct ring_buffer_event *event;
2974*4882a593Smuzhiyun unsigned int size, nr_entries;
2975*4882a593Smuzhiyun struct ftrace_stack *fstack;
2976*4882a593Smuzhiyun struct stack_entry *entry;
2977*4882a593Smuzhiyun int stackidx;
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun /*
2980*4882a593Smuzhiyun * Add one, for this function and the call to save_stack_trace()
2981*4882a593Smuzhiyun * If regs is set, then these functions will not be in the way.
2982*4882a593Smuzhiyun */
2983*4882a593Smuzhiyun #ifndef CONFIG_UNWINDER_ORC
2984*4882a593Smuzhiyun if (!regs)
2985*4882a593Smuzhiyun skip++;
2986*4882a593Smuzhiyun #endif
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun preempt_disable_notrace();
2989*4882a593Smuzhiyun
2990*4882a593Smuzhiyun stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2991*4882a593Smuzhiyun
2992*4882a593Smuzhiyun /* This should never happen. If it does, yell once and skip */
2993*4882a593Smuzhiyun if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2994*4882a593Smuzhiyun goto out;
2995*4882a593Smuzhiyun
2996*4882a593Smuzhiyun /*
2997*4882a593Smuzhiyun * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2998*4882a593Smuzhiyun * interrupt will either see the value pre increment or post
2999*4882a593Smuzhiyun * increment. If the interrupt happens pre increment it will have
3000*4882a593Smuzhiyun * restored the counter when it returns. We just need a barrier to
3001*4882a593Smuzhiyun * keep gcc from moving things around.
3002*4882a593Smuzhiyun */
3003*4882a593Smuzhiyun barrier();
3004*4882a593Smuzhiyun
3005*4882a593Smuzhiyun fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3006*4882a593Smuzhiyun size = ARRAY_SIZE(fstack->calls);
3007*4882a593Smuzhiyun
3008*4882a593Smuzhiyun if (regs) {
3009*4882a593Smuzhiyun nr_entries = stack_trace_save_regs(regs, fstack->calls,
3010*4882a593Smuzhiyun size, skip);
3011*4882a593Smuzhiyun } else {
3012*4882a593Smuzhiyun nr_entries = stack_trace_save(fstack->calls, size, skip);
3013*4882a593Smuzhiyun }
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun size = nr_entries * sizeof(unsigned long);
3016*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3017*4882a593Smuzhiyun (sizeof(*entry) - sizeof(entry->caller)) + size,
3018*4882a593Smuzhiyun flags, pc);
3019*4882a593Smuzhiyun if (!event)
3020*4882a593Smuzhiyun goto out;
3021*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
3022*4882a593Smuzhiyun
3023*4882a593Smuzhiyun memcpy(&entry->caller, fstack->calls, size);
3024*4882a593Smuzhiyun entry->size = nr_entries;
3025*4882a593Smuzhiyun
3026*4882a593Smuzhiyun if (!call_filter_check_discard(call, entry, buffer, event))
3027*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
3028*4882a593Smuzhiyun
3029*4882a593Smuzhiyun out:
3030*4882a593Smuzhiyun /* Again, don't let gcc optimize things here */
3031*4882a593Smuzhiyun barrier();
3032*4882a593Smuzhiyun __this_cpu_dec(ftrace_stack_reserve);
3033*4882a593Smuzhiyun preempt_enable_notrace();
3034*4882a593Smuzhiyun
3035*4882a593Smuzhiyun }
3036*4882a593Smuzhiyun
ftrace_trace_stack(struct trace_array * tr,struct trace_buffer * buffer,unsigned long flags,int skip,int pc,struct pt_regs * regs)3037*4882a593Smuzhiyun static inline void ftrace_trace_stack(struct trace_array *tr,
3038*4882a593Smuzhiyun struct trace_buffer *buffer,
3039*4882a593Smuzhiyun unsigned long flags,
3040*4882a593Smuzhiyun int skip, int pc, struct pt_regs *regs)
3041*4882a593Smuzhiyun {
3042*4882a593Smuzhiyun if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3043*4882a593Smuzhiyun return;
3044*4882a593Smuzhiyun
3045*4882a593Smuzhiyun __ftrace_trace_stack(buffer, flags, skip, pc, regs);
3046*4882a593Smuzhiyun }
3047*4882a593Smuzhiyun
__trace_stack(struct trace_array * tr,unsigned long flags,int skip,int pc)3048*4882a593Smuzhiyun void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3049*4882a593Smuzhiyun int pc)
3050*4882a593Smuzhiyun {
3051*4882a593Smuzhiyun struct trace_buffer *buffer = tr->array_buffer.buffer;
3052*4882a593Smuzhiyun
3053*4882a593Smuzhiyun if (rcu_is_watching()) {
3054*4882a593Smuzhiyun __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3055*4882a593Smuzhiyun return;
3056*4882a593Smuzhiyun }
3057*4882a593Smuzhiyun
3058*4882a593Smuzhiyun /*
3059*4882a593Smuzhiyun * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3060*4882a593Smuzhiyun * but if the above rcu_is_watching() failed, then the NMI
3061*4882a593Smuzhiyun * triggered someplace critical, and rcu_irq_enter() should
3062*4882a593Smuzhiyun * not be called from NMI.
3063*4882a593Smuzhiyun */
3064*4882a593Smuzhiyun if (unlikely(in_nmi()))
3065*4882a593Smuzhiyun return;
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun rcu_irq_enter_irqson();
3068*4882a593Smuzhiyun __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3069*4882a593Smuzhiyun rcu_irq_exit_irqson();
3070*4882a593Smuzhiyun }
3071*4882a593Smuzhiyun
3072*4882a593Smuzhiyun /**
3073*4882a593Smuzhiyun * trace_dump_stack - record a stack back trace in the trace buffer
3074*4882a593Smuzhiyun * @skip: Number of functions to skip (helper handlers)
3075*4882a593Smuzhiyun */
trace_dump_stack(int skip)3076*4882a593Smuzhiyun void trace_dump_stack(int skip)
3077*4882a593Smuzhiyun {
3078*4882a593Smuzhiyun unsigned long flags;
3079*4882a593Smuzhiyun
3080*4882a593Smuzhiyun if (tracing_disabled || tracing_selftest_running)
3081*4882a593Smuzhiyun return;
3082*4882a593Smuzhiyun
3083*4882a593Smuzhiyun local_save_flags(flags);
3084*4882a593Smuzhiyun
3085*4882a593Smuzhiyun #ifndef CONFIG_UNWINDER_ORC
3086*4882a593Smuzhiyun /* Skip 1 to skip this function. */
3087*4882a593Smuzhiyun skip++;
3088*4882a593Smuzhiyun #endif
3089*4882a593Smuzhiyun __ftrace_trace_stack(global_trace.array_buffer.buffer,
3090*4882a593Smuzhiyun flags, skip, preempt_count(), NULL);
3091*4882a593Smuzhiyun }
3092*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_dump_stack);
3093*4882a593Smuzhiyun
3094*4882a593Smuzhiyun #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3095*4882a593Smuzhiyun static DEFINE_PER_CPU(int, user_stack_count);
3096*4882a593Smuzhiyun
3097*4882a593Smuzhiyun static void
ftrace_trace_userstack(struct trace_array * tr,struct trace_buffer * buffer,unsigned long flags,int pc)3098*4882a593Smuzhiyun ftrace_trace_userstack(struct trace_array *tr,
3099*4882a593Smuzhiyun struct trace_buffer *buffer, unsigned long flags, int pc)
3100*4882a593Smuzhiyun {
3101*4882a593Smuzhiyun struct trace_event_call *call = &event_user_stack;
3102*4882a593Smuzhiyun struct ring_buffer_event *event;
3103*4882a593Smuzhiyun struct userstack_entry *entry;
3104*4882a593Smuzhiyun
3105*4882a593Smuzhiyun if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3106*4882a593Smuzhiyun return;
3107*4882a593Smuzhiyun
3108*4882a593Smuzhiyun /*
3109*4882a593Smuzhiyun * NMIs can not handle page faults, even with fix ups.
3110*4882a593Smuzhiyun * The save user stack can (and often does) fault.
3111*4882a593Smuzhiyun */
3112*4882a593Smuzhiyun if (unlikely(in_nmi()))
3113*4882a593Smuzhiyun return;
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun /*
3116*4882a593Smuzhiyun * prevent recursion, since the user stack tracing may
3117*4882a593Smuzhiyun * trigger other kernel events.
3118*4882a593Smuzhiyun */
3119*4882a593Smuzhiyun preempt_disable();
3120*4882a593Smuzhiyun if (__this_cpu_read(user_stack_count))
3121*4882a593Smuzhiyun goto out;
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun __this_cpu_inc(user_stack_count);
3124*4882a593Smuzhiyun
3125*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3126*4882a593Smuzhiyun sizeof(*entry), flags, pc);
3127*4882a593Smuzhiyun if (!event)
3128*4882a593Smuzhiyun goto out_drop_count;
3129*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
3130*4882a593Smuzhiyun
3131*4882a593Smuzhiyun entry->tgid = current->tgid;
3132*4882a593Smuzhiyun memset(&entry->caller, 0, sizeof(entry->caller));
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3135*4882a593Smuzhiyun if (!call_filter_check_discard(call, entry, buffer, event))
3136*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
3137*4882a593Smuzhiyun
3138*4882a593Smuzhiyun out_drop_count:
3139*4882a593Smuzhiyun __this_cpu_dec(user_stack_count);
3140*4882a593Smuzhiyun out:
3141*4882a593Smuzhiyun preempt_enable();
3142*4882a593Smuzhiyun }
3143*4882a593Smuzhiyun #else /* CONFIG_USER_STACKTRACE_SUPPORT */
ftrace_trace_userstack(struct trace_array * tr,struct trace_buffer * buffer,unsigned long flags,int pc)3144*4882a593Smuzhiyun static void ftrace_trace_userstack(struct trace_array *tr,
3145*4882a593Smuzhiyun struct trace_buffer *buffer,
3146*4882a593Smuzhiyun unsigned long flags, int pc)
3147*4882a593Smuzhiyun {
3148*4882a593Smuzhiyun }
3149*4882a593Smuzhiyun #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3150*4882a593Smuzhiyun
3151*4882a593Smuzhiyun #endif /* CONFIG_STACKTRACE */
3152*4882a593Smuzhiyun
3153*4882a593Smuzhiyun /* created for use with alloc_percpu */
3154*4882a593Smuzhiyun struct trace_buffer_struct {
3155*4882a593Smuzhiyun int nesting;
3156*4882a593Smuzhiyun char buffer[4][TRACE_BUF_SIZE];
3157*4882a593Smuzhiyun };
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3160*4882a593Smuzhiyun
3161*4882a593Smuzhiyun /*
3162*4882a593Smuzhiyun * Thise allows for lockless recording. If we're nested too deeply, then
3163*4882a593Smuzhiyun * this returns NULL.
3164*4882a593Smuzhiyun */
get_trace_buf(void)3165*4882a593Smuzhiyun static char *get_trace_buf(void)
3166*4882a593Smuzhiyun {
3167*4882a593Smuzhiyun struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun if (!trace_percpu_buffer || buffer->nesting >= 4)
3170*4882a593Smuzhiyun return NULL;
3171*4882a593Smuzhiyun
3172*4882a593Smuzhiyun buffer->nesting++;
3173*4882a593Smuzhiyun
3174*4882a593Smuzhiyun /* Interrupts must see nesting incremented before we use the buffer */
3175*4882a593Smuzhiyun barrier();
3176*4882a593Smuzhiyun return &buffer->buffer[buffer->nesting - 1][0];
3177*4882a593Smuzhiyun }
3178*4882a593Smuzhiyun
put_trace_buf(void)3179*4882a593Smuzhiyun static void put_trace_buf(void)
3180*4882a593Smuzhiyun {
3181*4882a593Smuzhiyun /* Don't let the decrement of nesting leak before this */
3182*4882a593Smuzhiyun barrier();
3183*4882a593Smuzhiyun this_cpu_dec(trace_percpu_buffer->nesting);
3184*4882a593Smuzhiyun }
3185*4882a593Smuzhiyun
alloc_percpu_trace_buffer(void)3186*4882a593Smuzhiyun static int alloc_percpu_trace_buffer(void)
3187*4882a593Smuzhiyun {
3188*4882a593Smuzhiyun struct trace_buffer_struct __percpu *buffers;
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun if (trace_percpu_buffer)
3191*4882a593Smuzhiyun return 0;
3192*4882a593Smuzhiyun
3193*4882a593Smuzhiyun buffers = alloc_percpu(struct trace_buffer_struct);
3194*4882a593Smuzhiyun if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3195*4882a593Smuzhiyun return -ENOMEM;
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun trace_percpu_buffer = buffers;
3198*4882a593Smuzhiyun return 0;
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun static int buffers_allocated;
3202*4882a593Smuzhiyun
trace_printk_init_buffers(void)3203*4882a593Smuzhiyun void trace_printk_init_buffers(void)
3204*4882a593Smuzhiyun {
3205*4882a593Smuzhiyun if (buffers_allocated)
3206*4882a593Smuzhiyun return;
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun if (alloc_percpu_trace_buffer())
3209*4882a593Smuzhiyun return;
3210*4882a593Smuzhiyun
3211*4882a593Smuzhiyun /* trace_printk() is for debug use only. Don't use it in production. */
3212*4882a593Smuzhiyun
3213*4882a593Smuzhiyun pr_warn("\n");
3214*4882a593Smuzhiyun pr_warn("**********************************************************\n");
3215*4882a593Smuzhiyun pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3216*4882a593Smuzhiyun pr_warn("** **\n");
3217*4882a593Smuzhiyun pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3218*4882a593Smuzhiyun pr_warn("** **\n");
3219*4882a593Smuzhiyun pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3220*4882a593Smuzhiyun pr_warn("** unsafe for production use. **\n");
3221*4882a593Smuzhiyun pr_warn("** **\n");
3222*4882a593Smuzhiyun pr_warn("** If you see this message and you are not debugging **\n");
3223*4882a593Smuzhiyun pr_warn("** the kernel, report this immediately to your vendor! **\n");
3224*4882a593Smuzhiyun pr_warn("** **\n");
3225*4882a593Smuzhiyun pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3226*4882a593Smuzhiyun pr_warn("**********************************************************\n");
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun /* Expand the buffers to set size */
3229*4882a593Smuzhiyun tracing_update_buffers();
3230*4882a593Smuzhiyun
3231*4882a593Smuzhiyun buffers_allocated = 1;
3232*4882a593Smuzhiyun
3233*4882a593Smuzhiyun /*
3234*4882a593Smuzhiyun * trace_printk_init_buffers() can be called by modules.
3235*4882a593Smuzhiyun * If that happens, then we need to start cmdline recording
3236*4882a593Smuzhiyun * directly here. If the global_trace.buffer is already
3237*4882a593Smuzhiyun * allocated here, then this was called by module code.
3238*4882a593Smuzhiyun */
3239*4882a593Smuzhiyun if (global_trace.array_buffer.buffer)
3240*4882a593Smuzhiyun tracing_start_cmdline_record();
3241*4882a593Smuzhiyun }
3242*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3243*4882a593Smuzhiyun
trace_printk_start_comm(void)3244*4882a593Smuzhiyun void trace_printk_start_comm(void)
3245*4882a593Smuzhiyun {
3246*4882a593Smuzhiyun /* Start tracing comms if trace printk is set */
3247*4882a593Smuzhiyun if (!buffers_allocated)
3248*4882a593Smuzhiyun return;
3249*4882a593Smuzhiyun tracing_start_cmdline_record();
3250*4882a593Smuzhiyun }
3251*4882a593Smuzhiyun
trace_printk_start_stop_comm(int enabled)3252*4882a593Smuzhiyun static void trace_printk_start_stop_comm(int enabled)
3253*4882a593Smuzhiyun {
3254*4882a593Smuzhiyun if (!buffers_allocated)
3255*4882a593Smuzhiyun return;
3256*4882a593Smuzhiyun
3257*4882a593Smuzhiyun if (enabled)
3258*4882a593Smuzhiyun tracing_start_cmdline_record();
3259*4882a593Smuzhiyun else
3260*4882a593Smuzhiyun tracing_stop_cmdline_record();
3261*4882a593Smuzhiyun }
3262*4882a593Smuzhiyun
3263*4882a593Smuzhiyun /**
3264*4882a593Smuzhiyun * trace_vbprintk - write binary msg to tracing buffer
3265*4882a593Smuzhiyun * @ip: The address of the caller
3266*4882a593Smuzhiyun * @fmt: The string format to write to the buffer
3267*4882a593Smuzhiyun * @args: Arguments for @fmt
3268*4882a593Smuzhiyun */
trace_vbprintk(unsigned long ip,const char * fmt,va_list args)3269*4882a593Smuzhiyun int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3270*4882a593Smuzhiyun {
3271*4882a593Smuzhiyun struct trace_event_call *call = &event_bprint;
3272*4882a593Smuzhiyun struct ring_buffer_event *event;
3273*4882a593Smuzhiyun struct trace_buffer *buffer;
3274*4882a593Smuzhiyun struct trace_array *tr = &global_trace;
3275*4882a593Smuzhiyun struct bprint_entry *entry;
3276*4882a593Smuzhiyun unsigned long flags;
3277*4882a593Smuzhiyun char *tbuffer;
3278*4882a593Smuzhiyun int len = 0, size, pc;
3279*4882a593Smuzhiyun
3280*4882a593Smuzhiyun if (unlikely(tracing_selftest_running || tracing_disabled))
3281*4882a593Smuzhiyun return 0;
3282*4882a593Smuzhiyun
3283*4882a593Smuzhiyun /* Don't pollute graph traces with trace_vprintk internals */
3284*4882a593Smuzhiyun pause_graph_tracing();
3285*4882a593Smuzhiyun
3286*4882a593Smuzhiyun pc = preempt_count();
3287*4882a593Smuzhiyun preempt_disable_notrace();
3288*4882a593Smuzhiyun
3289*4882a593Smuzhiyun tbuffer = get_trace_buf();
3290*4882a593Smuzhiyun if (!tbuffer) {
3291*4882a593Smuzhiyun len = 0;
3292*4882a593Smuzhiyun goto out_nobuffer;
3293*4882a593Smuzhiyun }
3294*4882a593Smuzhiyun
3295*4882a593Smuzhiyun len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3296*4882a593Smuzhiyun
3297*4882a593Smuzhiyun if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3298*4882a593Smuzhiyun goto out_put;
3299*4882a593Smuzhiyun
3300*4882a593Smuzhiyun local_save_flags(flags);
3301*4882a593Smuzhiyun size = sizeof(*entry) + sizeof(u32) * len;
3302*4882a593Smuzhiyun buffer = tr->array_buffer.buffer;
3303*4882a593Smuzhiyun ring_buffer_nest_start(buffer);
3304*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3305*4882a593Smuzhiyun flags, pc);
3306*4882a593Smuzhiyun if (!event)
3307*4882a593Smuzhiyun goto out;
3308*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
3309*4882a593Smuzhiyun entry->ip = ip;
3310*4882a593Smuzhiyun entry->fmt = fmt;
3311*4882a593Smuzhiyun
3312*4882a593Smuzhiyun memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3313*4882a593Smuzhiyun if (!call_filter_check_discard(call, entry, buffer, event)) {
3314*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
3315*4882a593Smuzhiyun ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3316*4882a593Smuzhiyun }
3317*4882a593Smuzhiyun
3318*4882a593Smuzhiyun out:
3319*4882a593Smuzhiyun ring_buffer_nest_end(buffer);
3320*4882a593Smuzhiyun out_put:
3321*4882a593Smuzhiyun put_trace_buf();
3322*4882a593Smuzhiyun
3323*4882a593Smuzhiyun out_nobuffer:
3324*4882a593Smuzhiyun preempt_enable_notrace();
3325*4882a593Smuzhiyun unpause_graph_tracing();
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun return len;
3328*4882a593Smuzhiyun }
3329*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_vbprintk);
3330*4882a593Smuzhiyun
3331*4882a593Smuzhiyun __printf(3, 0)
3332*4882a593Smuzhiyun static int
__trace_array_vprintk(struct trace_buffer * buffer,unsigned long ip,const char * fmt,va_list args)3333*4882a593Smuzhiyun __trace_array_vprintk(struct trace_buffer *buffer,
3334*4882a593Smuzhiyun unsigned long ip, const char *fmt, va_list args)
3335*4882a593Smuzhiyun {
3336*4882a593Smuzhiyun struct trace_event_call *call = &event_print;
3337*4882a593Smuzhiyun struct ring_buffer_event *event;
3338*4882a593Smuzhiyun int len = 0, size, pc;
3339*4882a593Smuzhiyun struct print_entry *entry;
3340*4882a593Smuzhiyun unsigned long flags;
3341*4882a593Smuzhiyun char *tbuffer;
3342*4882a593Smuzhiyun
3343*4882a593Smuzhiyun if (tracing_disabled || tracing_selftest_running)
3344*4882a593Smuzhiyun return 0;
3345*4882a593Smuzhiyun
3346*4882a593Smuzhiyun /* Don't pollute graph traces with trace_vprintk internals */
3347*4882a593Smuzhiyun pause_graph_tracing();
3348*4882a593Smuzhiyun
3349*4882a593Smuzhiyun pc = preempt_count();
3350*4882a593Smuzhiyun preempt_disable_notrace();
3351*4882a593Smuzhiyun
3352*4882a593Smuzhiyun
3353*4882a593Smuzhiyun tbuffer = get_trace_buf();
3354*4882a593Smuzhiyun if (!tbuffer) {
3355*4882a593Smuzhiyun len = 0;
3356*4882a593Smuzhiyun goto out_nobuffer;
3357*4882a593Smuzhiyun }
3358*4882a593Smuzhiyun
3359*4882a593Smuzhiyun len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3360*4882a593Smuzhiyun
3361*4882a593Smuzhiyun local_save_flags(flags);
3362*4882a593Smuzhiyun size = sizeof(*entry) + len + 1;
3363*4882a593Smuzhiyun ring_buffer_nest_start(buffer);
3364*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3365*4882a593Smuzhiyun flags, pc);
3366*4882a593Smuzhiyun if (!event)
3367*4882a593Smuzhiyun goto out;
3368*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
3369*4882a593Smuzhiyun entry->ip = ip;
3370*4882a593Smuzhiyun
3371*4882a593Smuzhiyun memcpy(&entry->buf, tbuffer, len + 1);
3372*4882a593Smuzhiyun if (!call_filter_check_discard(call, entry, buffer, event)) {
3373*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
3374*4882a593Smuzhiyun ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3375*4882a593Smuzhiyun }
3376*4882a593Smuzhiyun
3377*4882a593Smuzhiyun out:
3378*4882a593Smuzhiyun ring_buffer_nest_end(buffer);
3379*4882a593Smuzhiyun put_trace_buf();
3380*4882a593Smuzhiyun
3381*4882a593Smuzhiyun out_nobuffer:
3382*4882a593Smuzhiyun preempt_enable_notrace();
3383*4882a593Smuzhiyun unpause_graph_tracing();
3384*4882a593Smuzhiyun
3385*4882a593Smuzhiyun return len;
3386*4882a593Smuzhiyun }
3387*4882a593Smuzhiyun
3388*4882a593Smuzhiyun __printf(3, 0)
trace_array_vprintk(struct trace_array * tr,unsigned long ip,const char * fmt,va_list args)3389*4882a593Smuzhiyun int trace_array_vprintk(struct trace_array *tr,
3390*4882a593Smuzhiyun unsigned long ip, const char *fmt, va_list args)
3391*4882a593Smuzhiyun {
3392*4882a593Smuzhiyun return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3393*4882a593Smuzhiyun }
3394*4882a593Smuzhiyun
3395*4882a593Smuzhiyun /**
3396*4882a593Smuzhiyun * trace_array_printk - Print a message to a specific instance
3397*4882a593Smuzhiyun * @tr: The instance trace_array descriptor
3398*4882a593Smuzhiyun * @ip: The instruction pointer that this is called from.
3399*4882a593Smuzhiyun * @fmt: The format to print (printf format)
3400*4882a593Smuzhiyun *
3401*4882a593Smuzhiyun * If a subsystem sets up its own instance, they have the right to
3402*4882a593Smuzhiyun * printk strings into their tracing instance buffer using this
3403*4882a593Smuzhiyun * function. Note, this function will not write into the top level
3404*4882a593Smuzhiyun * buffer (use trace_printk() for that), as writing into the top level
3405*4882a593Smuzhiyun * buffer should only have events that can be individually disabled.
3406*4882a593Smuzhiyun * trace_printk() is only used for debugging a kernel, and should not
3407*4882a593Smuzhiyun * be ever encorporated in normal use.
3408*4882a593Smuzhiyun *
3409*4882a593Smuzhiyun * trace_array_printk() can be used, as it will not add noise to the
3410*4882a593Smuzhiyun * top level tracing buffer.
3411*4882a593Smuzhiyun *
3412*4882a593Smuzhiyun * Note, trace_array_init_printk() must be called on @tr before this
3413*4882a593Smuzhiyun * can be used.
3414*4882a593Smuzhiyun */
3415*4882a593Smuzhiyun __printf(3, 0)
trace_array_printk(struct trace_array * tr,unsigned long ip,const char * fmt,...)3416*4882a593Smuzhiyun int trace_array_printk(struct trace_array *tr,
3417*4882a593Smuzhiyun unsigned long ip, const char *fmt, ...)
3418*4882a593Smuzhiyun {
3419*4882a593Smuzhiyun int ret;
3420*4882a593Smuzhiyun va_list ap;
3421*4882a593Smuzhiyun
3422*4882a593Smuzhiyun if (!tr)
3423*4882a593Smuzhiyun return -ENOENT;
3424*4882a593Smuzhiyun
3425*4882a593Smuzhiyun /* This is only allowed for created instances */
3426*4882a593Smuzhiyun if (tr == &global_trace)
3427*4882a593Smuzhiyun return 0;
3428*4882a593Smuzhiyun
3429*4882a593Smuzhiyun if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3430*4882a593Smuzhiyun return 0;
3431*4882a593Smuzhiyun
3432*4882a593Smuzhiyun va_start(ap, fmt);
3433*4882a593Smuzhiyun ret = trace_array_vprintk(tr, ip, fmt, ap);
3434*4882a593Smuzhiyun va_end(ap);
3435*4882a593Smuzhiyun return ret;
3436*4882a593Smuzhiyun }
3437*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_array_printk);
3438*4882a593Smuzhiyun
3439*4882a593Smuzhiyun /**
3440*4882a593Smuzhiyun * trace_array_init_printk - Initialize buffers for trace_array_printk()
3441*4882a593Smuzhiyun * @tr: The trace array to initialize the buffers for
3442*4882a593Smuzhiyun *
3443*4882a593Smuzhiyun * As trace_array_printk() only writes into instances, they are OK to
3444*4882a593Smuzhiyun * have in the kernel (unlike trace_printk()). This needs to be called
3445*4882a593Smuzhiyun * before trace_array_printk() can be used on a trace_array.
3446*4882a593Smuzhiyun */
trace_array_init_printk(struct trace_array * tr)3447*4882a593Smuzhiyun int trace_array_init_printk(struct trace_array *tr)
3448*4882a593Smuzhiyun {
3449*4882a593Smuzhiyun if (!tr)
3450*4882a593Smuzhiyun return -ENOENT;
3451*4882a593Smuzhiyun
3452*4882a593Smuzhiyun /* This is only allowed for created instances */
3453*4882a593Smuzhiyun if (tr == &global_trace)
3454*4882a593Smuzhiyun return -EINVAL;
3455*4882a593Smuzhiyun
3456*4882a593Smuzhiyun return alloc_percpu_trace_buffer();
3457*4882a593Smuzhiyun }
3458*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_array_init_printk);
3459*4882a593Smuzhiyun
3460*4882a593Smuzhiyun __printf(3, 4)
trace_array_printk_buf(struct trace_buffer * buffer,unsigned long ip,const char * fmt,...)3461*4882a593Smuzhiyun int trace_array_printk_buf(struct trace_buffer *buffer,
3462*4882a593Smuzhiyun unsigned long ip, const char *fmt, ...)
3463*4882a593Smuzhiyun {
3464*4882a593Smuzhiyun int ret;
3465*4882a593Smuzhiyun va_list ap;
3466*4882a593Smuzhiyun
3467*4882a593Smuzhiyun if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3468*4882a593Smuzhiyun return 0;
3469*4882a593Smuzhiyun
3470*4882a593Smuzhiyun va_start(ap, fmt);
3471*4882a593Smuzhiyun ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3472*4882a593Smuzhiyun va_end(ap);
3473*4882a593Smuzhiyun return ret;
3474*4882a593Smuzhiyun }
3475*4882a593Smuzhiyun
3476*4882a593Smuzhiyun __printf(2, 0)
trace_vprintk(unsigned long ip,const char * fmt,va_list args)3477*4882a593Smuzhiyun int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3478*4882a593Smuzhiyun {
3479*4882a593Smuzhiyun return trace_array_vprintk(&global_trace, ip, fmt, args);
3480*4882a593Smuzhiyun }
3481*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_vprintk);
3482*4882a593Smuzhiyun
trace_iterator_increment(struct trace_iterator * iter)3483*4882a593Smuzhiyun static void trace_iterator_increment(struct trace_iterator *iter)
3484*4882a593Smuzhiyun {
3485*4882a593Smuzhiyun struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3486*4882a593Smuzhiyun
3487*4882a593Smuzhiyun iter->idx++;
3488*4882a593Smuzhiyun if (buf_iter)
3489*4882a593Smuzhiyun ring_buffer_iter_advance(buf_iter);
3490*4882a593Smuzhiyun }
3491*4882a593Smuzhiyun
3492*4882a593Smuzhiyun static struct trace_entry *
peek_next_entry(struct trace_iterator * iter,int cpu,u64 * ts,unsigned long * lost_events)3493*4882a593Smuzhiyun peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3494*4882a593Smuzhiyun unsigned long *lost_events)
3495*4882a593Smuzhiyun {
3496*4882a593Smuzhiyun struct ring_buffer_event *event;
3497*4882a593Smuzhiyun struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3498*4882a593Smuzhiyun
3499*4882a593Smuzhiyun if (buf_iter) {
3500*4882a593Smuzhiyun event = ring_buffer_iter_peek(buf_iter, ts);
3501*4882a593Smuzhiyun if (lost_events)
3502*4882a593Smuzhiyun *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3503*4882a593Smuzhiyun (unsigned long)-1 : 0;
3504*4882a593Smuzhiyun } else {
3505*4882a593Smuzhiyun event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3506*4882a593Smuzhiyun lost_events);
3507*4882a593Smuzhiyun }
3508*4882a593Smuzhiyun
3509*4882a593Smuzhiyun if (event) {
3510*4882a593Smuzhiyun iter->ent_size = ring_buffer_event_length(event);
3511*4882a593Smuzhiyun return ring_buffer_event_data(event);
3512*4882a593Smuzhiyun }
3513*4882a593Smuzhiyun iter->ent_size = 0;
3514*4882a593Smuzhiyun return NULL;
3515*4882a593Smuzhiyun }
3516*4882a593Smuzhiyun
3517*4882a593Smuzhiyun static struct trace_entry *
__find_next_entry(struct trace_iterator * iter,int * ent_cpu,unsigned long * missing_events,u64 * ent_ts)3518*4882a593Smuzhiyun __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3519*4882a593Smuzhiyun unsigned long *missing_events, u64 *ent_ts)
3520*4882a593Smuzhiyun {
3521*4882a593Smuzhiyun struct trace_buffer *buffer = iter->array_buffer->buffer;
3522*4882a593Smuzhiyun struct trace_entry *ent, *next = NULL;
3523*4882a593Smuzhiyun unsigned long lost_events = 0, next_lost = 0;
3524*4882a593Smuzhiyun int cpu_file = iter->cpu_file;
3525*4882a593Smuzhiyun u64 next_ts = 0, ts;
3526*4882a593Smuzhiyun int next_cpu = -1;
3527*4882a593Smuzhiyun int next_size = 0;
3528*4882a593Smuzhiyun int cpu;
3529*4882a593Smuzhiyun
3530*4882a593Smuzhiyun /*
3531*4882a593Smuzhiyun * If we are in a per_cpu trace file, don't bother by iterating over
3532*4882a593Smuzhiyun * all cpu and peek directly.
3533*4882a593Smuzhiyun */
3534*4882a593Smuzhiyun if (cpu_file > RING_BUFFER_ALL_CPUS) {
3535*4882a593Smuzhiyun if (ring_buffer_empty_cpu(buffer, cpu_file))
3536*4882a593Smuzhiyun return NULL;
3537*4882a593Smuzhiyun ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3538*4882a593Smuzhiyun if (ent_cpu)
3539*4882a593Smuzhiyun *ent_cpu = cpu_file;
3540*4882a593Smuzhiyun
3541*4882a593Smuzhiyun return ent;
3542*4882a593Smuzhiyun }
3543*4882a593Smuzhiyun
3544*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
3545*4882a593Smuzhiyun
3546*4882a593Smuzhiyun if (ring_buffer_empty_cpu(buffer, cpu))
3547*4882a593Smuzhiyun continue;
3548*4882a593Smuzhiyun
3549*4882a593Smuzhiyun ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3550*4882a593Smuzhiyun
3551*4882a593Smuzhiyun /*
3552*4882a593Smuzhiyun * Pick the entry with the smallest timestamp:
3553*4882a593Smuzhiyun */
3554*4882a593Smuzhiyun if (ent && (!next || ts < next_ts)) {
3555*4882a593Smuzhiyun next = ent;
3556*4882a593Smuzhiyun next_cpu = cpu;
3557*4882a593Smuzhiyun next_ts = ts;
3558*4882a593Smuzhiyun next_lost = lost_events;
3559*4882a593Smuzhiyun next_size = iter->ent_size;
3560*4882a593Smuzhiyun }
3561*4882a593Smuzhiyun }
3562*4882a593Smuzhiyun
3563*4882a593Smuzhiyun iter->ent_size = next_size;
3564*4882a593Smuzhiyun
3565*4882a593Smuzhiyun if (ent_cpu)
3566*4882a593Smuzhiyun *ent_cpu = next_cpu;
3567*4882a593Smuzhiyun
3568*4882a593Smuzhiyun if (ent_ts)
3569*4882a593Smuzhiyun *ent_ts = next_ts;
3570*4882a593Smuzhiyun
3571*4882a593Smuzhiyun if (missing_events)
3572*4882a593Smuzhiyun *missing_events = next_lost;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun return next;
3575*4882a593Smuzhiyun }
3576*4882a593Smuzhiyun
3577*4882a593Smuzhiyun #define STATIC_TEMP_BUF_SIZE 128
3578*4882a593Smuzhiyun static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3579*4882a593Smuzhiyun
3580*4882a593Smuzhiyun /* Find the next real entry, without updating the iterator itself */
trace_find_next_entry(struct trace_iterator * iter,int * ent_cpu,u64 * ent_ts)3581*4882a593Smuzhiyun struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3582*4882a593Smuzhiyun int *ent_cpu, u64 *ent_ts)
3583*4882a593Smuzhiyun {
3584*4882a593Smuzhiyun /* __find_next_entry will reset ent_size */
3585*4882a593Smuzhiyun int ent_size = iter->ent_size;
3586*4882a593Smuzhiyun struct trace_entry *entry;
3587*4882a593Smuzhiyun
3588*4882a593Smuzhiyun /*
3589*4882a593Smuzhiyun * If called from ftrace_dump(), then the iter->temp buffer
3590*4882a593Smuzhiyun * will be the static_temp_buf and not created from kmalloc.
3591*4882a593Smuzhiyun * If the entry size is greater than the buffer, we can
3592*4882a593Smuzhiyun * not save it. Just return NULL in that case. This is only
3593*4882a593Smuzhiyun * used to add markers when two consecutive events' time
3594*4882a593Smuzhiyun * stamps have a large delta. See trace_print_lat_context()
3595*4882a593Smuzhiyun */
3596*4882a593Smuzhiyun if (iter->temp == static_temp_buf &&
3597*4882a593Smuzhiyun STATIC_TEMP_BUF_SIZE < ent_size)
3598*4882a593Smuzhiyun return NULL;
3599*4882a593Smuzhiyun
3600*4882a593Smuzhiyun /*
3601*4882a593Smuzhiyun * The __find_next_entry() may call peek_next_entry(), which may
3602*4882a593Smuzhiyun * call ring_buffer_peek() that may make the contents of iter->ent
3603*4882a593Smuzhiyun * undefined. Need to copy iter->ent now.
3604*4882a593Smuzhiyun */
3605*4882a593Smuzhiyun if (iter->ent && iter->ent != iter->temp) {
3606*4882a593Smuzhiyun if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3607*4882a593Smuzhiyun !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3608*4882a593Smuzhiyun void *temp;
3609*4882a593Smuzhiyun temp = kmalloc(iter->ent_size, GFP_KERNEL);
3610*4882a593Smuzhiyun if (!temp)
3611*4882a593Smuzhiyun return NULL;
3612*4882a593Smuzhiyun kfree(iter->temp);
3613*4882a593Smuzhiyun iter->temp = temp;
3614*4882a593Smuzhiyun iter->temp_size = iter->ent_size;
3615*4882a593Smuzhiyun }
3616*4882a593Smuzhiyun memcpy(iter->temp, iter->ent, iter->ent_size);
3617*4882a593Smuzhiyun iter->ent = iter->temp;
3618*4882a593Smuzhiyun }
3619*4882a593Smuzhiyun entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3620*4882a593Smuzhiyun /* Put back the original ent_size */
3621*4882a593Smuzhiyun iter->ent_size = ent_size;
3622*4882a593Smuzhiyun
3623*4882a593Smuzhiyun return entry;
3624*4882a593Smuzhiyun }
3625*4882a593Smuzhiyun
3626*4882a593Smuzhiyun /* Find the next real entry, and increment the iterator to the next entry */
trace_find_next_entry_inc(struct trace_iterator * iter)3627*4882a593Smuzhiyun void *trace_find_next_entry_inc(struct trace_iterator *iter)
3628*4882a593Smuzhiyun {
3629*4882a593Smuzhiyun iter->ent = __find_next_entry(iter, &iter->cpu,
3630*4882a593Smuzhiyun &iter->lost_events, &iter->ts);
3631*4882a593Smuzhiyun
3632*4882a593Smuzhiyun if (iter->ent)
3633*4882a593Smuzhiyun trace_iterator_increment(iter);
3634*4882a593Smuzhiyun
3635*4882a593Smuzhiyun return iter->ent ? iter : NULL;
3636*4882a593Smuzhiyun }
3637*4882a593Smuzhiyun
trace_consume(struct trace_iterator * iter)3638*4882a593Smuzhiyun static void trace_consume(struct trace_iterator *iter)
3639*4882a593Smuzhiyun {
3640*4882a593Smuzhiyun ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3641*4882a593Smuzhiyun &iter->lost_events);
3642*4882a593Smuzhiyun }
3643*4882a593Smuzhiyun
s_next(struct seq_file * m,void * v,loff_t * pos)3644*4882a593Smuzhiyun static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3645*4882a593Smuzhiyun {
3646*4882a593Smuzhiyun struct trace_iterator *iter = m->private;
3647*4882a593Smuzhiyun int i = (int)*pos;
3648*4882a593Smuzhiyun void *ent;
3649*4882a593Smuzhiyun
3650*4882a593Smuzhiyun WARN_ON_ONCE(iter->leftover);
3651*4882a593Smuzhiyun
3652*4882a593Smuzhiyun (*pos)++;
3653*4882a593Smuzhiyun
3654*4882a593Smuzhiyun /* can't go backwards */
3655*4882a593Smuzhiyun if (iter->idx > i)
3656*4882a593Smuzhiyun return NULL;
3657*4882a593Smuzhiyun
3658*4882a593Smuzhiyun if (iter->idx < 0)
3659*4882a593Smuzhiyun ent = trace_find_next_entry_inc(iter);
3660*4882a593Smuzhiyun else
3661*4882a593Smuzhiyun ent = iter;
3662*4882a593Smuzhiyun
3663*4882a593Smuzhiyun while (ent && iter->idx < i)
3664*4882a593Smuzhiyun ent = trace_find_next_entry_inc(iter);
3665*4882a593Smuzhiyun
3666*4882a593Smuzhiyun iter->pos = *pos;
3667*4882a593Smuzhiyun
3668*4882a593Smuzhiyun return ent;
3669*4882a593Smuzhiyun }
3670*4882a593Smuzhiyun
tracing_iter_reset(struct trace_iterator * iter,int cpu)3671*4882a593Smuzhiyun void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3672*4882a593Smuzhiyun {
3673*4882a593Smuzhiyun struct ring_buffer_iter *buf_iter;
3674*4882a593Smuzhiyun unsigned long entries = 0;
3675*4882a593Smuzhiyun u64 ts;
3676*4882a593Smuzhiyun
3677*4882a593Smuzhiyun per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun buf_iter = trace_buffer_iter(iter, cpu);
3680*4882a593Smuzhiyun if (!buf_iter)
3681*4882a593Smuzhiyun return;
3682*4882a593Smuzhiyun
3683*4882a593Smuzhiyun ring_buffer_iter_reset(buf_iter);
3684*4882a593Smuzhiyun
3685*4882a593Smuzhiyun /*
3686*4882a593Smuzhiyun * We could have the case with the max latency tracers
3687*4882a593Smuzhiyun * that a reset never took place on a cpu. This is evident
3688*4882a593Smuzhiyun * by the timestamp being before the start of the buffer.
3689*4882a593Smuzhiyun */
3690*4882a593Smuzhiyun while (ring_buffer_iter_peek(buf_iter, &ts)) {
3691*4882a593Smuzhiyun if (ts >= iter->array_buffer->time_start)
3692*4882a593Smuzhiyun break;
3693*4882a593Smuzhiyun entries++;
3694*4882a593Smuzhiyun ring_buffer_iter_advance(buf_iter);
3695*4882a593Smuzhiyun }
3696*4882a593Smuzhiyun
3697*4882a593Smuzhiyun per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3698*4882a593Smuzhiyun }
3699*4882a593Smuzhiyun
3700*4882a593Smuzhiyun /*
3701*4882a593Smuzhiyun * The current tracer is copied to avoid a global locking
3702*4882a593Smuzhiyun * all around.
3703*4882a593Smuzhiyun */
s_start(struct seq_file * m,loff_t * pos)3704*4882a593Smuzhiyun static void *s_start(struct seq_file *m, loff_t *pos)
3705*4882a593Smuzhiyun {
3706*4882a593Smuzhiyun struct trace_iterator *iter = m->private;
3707*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
3708*4882a593Smuzhiyun int cpu_file = iter->cpu_file;
3709*4882a593Smuzhiyun void *p = NULL;
3710*4882a593Smuzhiyun loff_t l = 0;
3711*4882a593Smuzhiyun int cpu;
3712*4882a593Smuzhiyun
3713*4882a593Smuzhiyun /*
3714*4882a593Smuzhiyun * copy the tracer to avoid using a global lock all around.
3715*4882a593Smuzhiyun * iter->trace is a copy of current_trace, the pointer to the
3716*4882a593Smuzhiyun * name may be used instead of a strcmp(), as iter->trace->name
3717*4882a593Smuzhiyun * will point to the same string as current_trace->name.
3718*4882a593Smuzhiyun */
3719*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
3720*4882a593Smuzhiyun if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3721*4882a593Smuzhiyun *iter->trace = *tr->current_trace;
3722*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
3723*4882a593Smuzhiyun
3724*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
3725*4882a593Smuzhiyun if (iter->snapshot && iter->trace->use_max_tr)
3726*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
3727*4882a593Smuzhiyun #endif
3728*4882a593Smuzhiyun
3729*4882a593Smuzhiyun if (*pos != iter->pos) {
3730*4882a593Smuzhiyun iter->ent = NULL;
3731*4882a593Smuzhiyun iter->cpu = 0;
3732*4882a593Smuzhiyun iter->idx = -1;
3733*4882a593Smuzhiyun
3734*4882a593Smuzhiyun if (cpu_file == RING_BUFFER_ALL_CPUS) {
3735*4882a593Smuzhiyun for_each_tracing_cpu(cpu)
3736*4882a593Smuzhiyun tracing_iter_reset(iter, cpu);
3737*4882a593Smuzhiyun } else
3738*4882a593Smuzhiyun tracing_iter_reset(iter, cpu_file);
3739*4882a593Smuzhiyun
3740*4882a593Smuzhiyun iter->leftover = 0;
3741*4882a593Smuzhiyun for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3742*4882a593Smuzhiyun ;
3743*4882a593Smuzhiyun
3744*4882a593Smuzhiyun } else {
3745*4882a593Smuzhiyun /*
3746*4882a593Smuzhiyun * If we overflowed the seq_file before, then we want
3747*4882a593Smuzhiyun * to just reuse the trace_seq buffer again.
3748*4882a593Smuzhiyun */
3749*4882a593Smuzhiyun if (iter->leftover)
3750*4882a593Smuzhiyun p = iter;
3751*4882a593Smuzhiyun else {
3752*4882a593Smuzhiyun l = *pos - 1;
3753*4882a593Smuzhiyun p = s_next(m, p, &l);
3754*4882a593Smuzhiyun }
3755*4882a593Smuzhiyun }
3756*4882a593Smuzhiyun
3757*4882a593Smuzhiyun trace_event_read_lock();
3758*4882a593Smuzhiyun trace_access_lock(cpu_file);
3759*4882a593Smuzhiyun return p;
3760*4882a593Smuzhiyun }
3761*4882a593Smuzhiyun
s_stop(struct seq_file * m,void * p)3762*4882a593Smuzhiyun static void s_stop(struct seq_file *m, void *p)
3763*4882a593Smuzhiyun {
3764*4882a593Smuzhiyun struct trace_iterator *iter = m->private;
3765*4882a593Smuzhiyun
3766*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
3767*4882a593Smuzhiyun if (iter->snapshot && iter->trace->use_max_tr)
3768*4882a593Smuzhiyun return;
3769*4882a593Smuzhiyun #endif
3770*4882a593Smuzhiyun
3771*4882a593Smuzhiyun trace_access_unlock(iter->cpu_file);
3772*4882a593Smuzhiyun trace_event_read_unlock();
3773*4882a593Smuzhiyun }
3774*4882a593Smuzhiyun
3775*4882a593Smuzhiyun static void
get_total_entries_cpu(struct array_buffer * buf,unsigned long * total,unsigned long * entries,int cpu)3776*4882a593Smuzhiyun get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3777*4882a593Smuzhiyun unsigned long *entries, int cpu)
3778*4882a593Smuzhiyun {
3779*4882a593Smuzhiyun unsigned long count;
3780*4882a593Smuzhiyun
3781*4882a593Smuzhiyun count = ring_buffer_entries_cpu(buf->buffer, cpu);
3782*4882a593Smuzhiyun /*
3783*4882a593Smuzhiyun * If this buffer has skipped entries, then we hold all
3784*4882a593Smuzhiyun * entries for the trace and we need to ignore the
3785*4882a593Smuzhiyun * ones before the time stamp.
3786*4882a593Smuzhiyun */
3787*4882a593Smuzhiyun if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3788*4882a593Smuzhiyun count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3789*4882a593Smuzhiyun /* total is the same as the entries */
3790*4882a593Smuzhiyun *total = count;
3791*4882a593Smuzhiyun } else
3792*4882a593Smuzhiyun *total = count +
3793*4882a593Smuzhiyun ring_buffer_overrun_cpu(buf->buffer, cpu);
3794*4882a593Smuzhiyun *entries = count;
3795*4882a593Smuzhiyun }
3796*4882a593Smuzhiyun
3797*4882a593Smuzhiyun static void
get_total_entries(struct array_buffer * buf,unsigned long * total,unsigned long * entries)3798*4882a593Smuzhiyun get_total_entries(struct array_buffer *buf,
3799*4882a593Smuzhiyun unsigned long *total, unsigned long *entries)
3800*4882a593Smuzhiyun {
3801*4882a593Smuzhiyun unsigned long t, e;
3802*4882a593Smuzhiyun int cpu;
3803*4882a593Smuzhiyun
3804*4882a593Smuzhiyun *total = 0;
3805*4882a593Smuzhiyun *entries = 0;
3806*4882a593Smuzhiyun
3807*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
3808*4882a593Smuzhiyun get_total_entries_cpu(buf, &t, &e, cpu);
3809*4882a593Smuzhiyun *total += t;
3810*4882a593Smuzhiyun *entries += e;
3811*4882a593Smuzhiyun }
3812*4882a593Smuzhiyun }
3813*4882a593Smuzhiyun
trace_total_entries_cpu(struct trace_array * tr,int cpu)3814*4882a593Smuzhiyun unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3815*4882a593Smuzhiyun {
3816*4882a593Smuzhiyun unsigned long total, entries;
3817*4882a593Smuzhiyun
3818*4882a593Smuzhiyun if (!tr)
3819*4882a593Smuzhiyun tr = &global_trace;
3820*4882a593Smuzhiyun
3821*4882a593Smuzhiyun get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3822*4882a593Smuzhiyun
3823*4882a593Smuzhiyun return entries;
3824*4882a593Smuzhiyun }
3825*4882a593Smuzhiyun
trace_total_entries(struct trace_array * tr)3826*4882a593Smuzhiyun unsigned long trace_total_entries(struct trace_array *tr)
3827*4882a593Smuzhiyun {
3828*4882a593Smuzhiyun unsigned long total, entries;
3829*4882a593Smuzhiyun
3830*4882a593Smuzhiyun if (!tr)
3831*4882a593Smuzhiyun tr = &global_trace;
3832*4882a593Smuzhiyun
3833*4882a593Smuzhiyun get_total_entries(&tr->array_buffer, &total, &entries);
3834*4882a593Smuzhiyun
3835*4882a593Smuzhiyun return entries;
3836*4882a593Smuzhiyun }
3837*4882a593Smuzhiyun
print_lat_help_header(struct seq_file * m)3838*4882a593Smuzhiyun static void print_lat_help_header(struct seq_file *m)
3839*4882a593Smuzhiyun {
3840*4882a593Smuzhiyun seq_puts(m, "# _------=> CPU# \n"
3841*4882a593Smuzhiyun "# / _-----=> irqs-off \n"
3842*4882a593Smuzhiyun "# | / _----=> need-resched \n"
3843*4882a593Smuzhiyun "# || / _---=> hardirq/softirq \n"
3844*4882a593Smuzhiyun "# ||| / _--=> preempt-depth \n"
3845*4882a593Smuzhiyun "# |||| / delay \n"
3846*4882a593Smuzhiyun "# cmd pid ||||| time | caller \n"
3847*4882a593Smuzhiyun "# \\ / ||||| \\ | / \n");
3848*4882a593Smuzhiyun }
3849*4882a593Smuzhiyun
print_event_info(struct array_buffer * buf,struct seq_file * m)3850*4882a593Smuzhiyun static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3851*4882a593Smuzhiyun {
3852*4882a593Smuzhiyun unsigned long total;
3853*4882a593Smuzhiyun unsigned long entries;
3854*4882a593Smuzhiyun
3855*4882a593Smuzhiyun get_total_entries(buf, &total, &entries);
3856*4882a593Smuzhiyun seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3857*4882a593Smuzhiyun entries, total, num_online_cpus());
3858*4882a593Smuzhiyun seq_puts(m, "#\n");
3859*4882a593Smuzhiyun }
3860*4882a593Smuzhiyun
print_func_help_header(struct array_buffer * buf,struct seq_file * m,unsigned int flags)3861*4882a593Smuzhiyun static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3862*4882a593Smuzhiyun unsigned int flags)
3863*4882a593Smuzhiyun {
3864*4882a593Smuzhiyun bool tgid = flags & TRACE_ITER_RECORD_TGID;
3865*4882a593Smuzhiyun
3866*4882a593Smuzhiyun print_event_info(buf, m);
3867*4882a593Smuzhiyun
3868*4882a593Smuzhiyun seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3869*4882a593Smuzhiyun seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3870*4882a593Smuzhiyun }
3871*4882a593Smuzhiyun
print_func_help_header_irq(struct array_buffer * buf,struct seq_file * m,unsigned int flags)3872*4882a593Smuzhiyun static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3873*4882a593Smuzhiyun unsigned int flags)
3874*4882a593Smuzhiyun {
3875*4882a593Smuzhiyun bool tgid = flags & TRACE_ITER_RECORD_TGID;
3876*4882a593Smuzhiyun const char *space = " ";
3877*4882a593Smuzhiyun int prec = tgid ? 12 : 2;
3878*4882a593Smuzhiyun
3879*4882a593Smuzhiyun print_event_info(buf, m);
3880*4882a593Smuzhiyun
3881*4882a593Smuzhiyun seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3882*4882a593Smuzhiyun seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3883*4882a593Smuzhiyun seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3884*4882a593Smuzhiyun seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3885*4882a593Smuzhiyun seq_printf(m, "# %.*s||| / delay\n", prec, space);
3886*4882a593Smuzhiyun seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3887*4882a593Smuzhiyun seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3888*4882a593Smuzhiyun }
3889*4882a593Smuzhiyun
3890*4882a593Smuzhiyun void
print_trace_header(struct seq_file * m,struct trace_iterator * iter)3891*4882a593Smuzhiyun print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3892*4882a593Smuzhiyun {
3893*4882a593Smuzhiyun unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3894*4882a593Smuzhiyun struct array_buffer *buf = iter->array_buffer;
3895*4882a593Smuzhiyun struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3896*4882a593Smuzhiyun struct tracer *type = iter->trace;
3897*4882a593Smuzhiyun unsigned long entries;
3898*4882a593Smuzhiyun unsigned long total;
3899*4882a593Smuzhiyun const char *name = "preemption";
3900*4882a593Smuzhiyun
3901*4882a593Smuzhiyun name = type->name;
3902*4882a593Smuzhiyun
3903*4882a593Smuzhiyun get_total_entries(buf, &total, &entries);
3904*4882a593Smuzhiyun
3905*4882a593Smuzhiyun seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3906*4882a593Smuzhiyun name, UTS_RELEASE);
3907*4882a593Smuzhiyun seq_puts(m, "# -----------------------------------"
3908*4882a593Smuzhiyun "---------------------------------\n");
3909*4882a593Smuzhiyun seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3910*4882a593Smuzhiyun " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3911*4882a593Smuzhiyun nsecs_to_usecs(data->saved_latency),
3912*4882a593Smuzhiyun entries,
3913*4882a593Smuzhiyun total,
3914*4882a593Smuzhiyun buf->cpu,
3915*4882a593Smuzhiyun #if defined(CONFIG_PREEMPT_NONE)
3916*4882a593Smuzhiyun "server",
3917*4882a593Smuzhiyun #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3918*4882a593Smuzhiyun "desktop",
3919*4882a593Smuzhiyun #elif defined(CONFIG_PREEMPT)
3920*4882a593Smuzhiyun "preempt",
3921*4882a593Smuzhiyun #elif defined(CONFIG_PREEMPT_RT)
3922*4882a593Smuzhiyun "preempt_rt",
3923*4882a593Smuzhiyun #else
3924*4882a593Smuzhiyun "unknown",
3925*4882a593Smuzhiyun #endif
3926*4882a593Smuzhiyun /* These are reserved for later use */
3927*4882a593Smuzhiyun 0, 0, 0, 0);
3928*4882a593Smuzhiyun #ifdef CONFIG_SMP
3929*4882a593Smuzhiyun seq_printf(m, " #P:%d)\n", num_online_cpus());
3930*4882a593Smuzhiyun #else
3931*4882a593Smuzhiyun seq_puts(m, ")\n");
3932*4882a593Smuzhiyun #endif
3933*4882a593Smuzhiyun seq_puts(m, "# -----------------\n");
3934*4882a593Smuzhiyun seq_printf(m, "# | task: %.16s-%d "
3935*4882a593Smuzhiyun "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3936*4882a593Smuzhiyun data->comm, data->pid,
3937*4882a593Smuzhiyun from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3938*4882a593Smuzhiyun data->policy, data->rt_priority);
3939*4882a593Smuzhiyun seq_puts(m, "# -----------------\n");
3940*4882a593Smuzhiyun
3941*4882a593Smuzhiyun if (data->critical_start) {
3942*4882a593Smuzhiyun seq_puts(m, "# => started at: ");
3943*4882a593Smuzhiyun seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3944*4882a593Smuzhiyun trace_print_seq(m, &iter->seq);
3945*4882a593Smuzhiyun seq_puts(m, "\n# => ended at: ");
3946*4882a593Smuzhiyun seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3947*4882a593Smuzhiyun trace_print_seq(m, &iter->seq);
3948*4882a593Smuzhiyun seq_puts(m, "\n#\n");
3949*4882a593Smuzhiyun }
3950*4882a593Smuzhiyun
3951*4882a593Smuzhiyun seq_puts(m, "#\n");
3952*4882a593Smuzhiyun }
3953*4882a593Smuzhiyun
test_cpu_buff_start(struct trace_iterator * iter)3954*4882a593Smuzhiyun static void test_cpu_buff_start(struct trace_iterator *iter)
3955*4882a593Smuzhiyun {
3956*4882a593Smuzhiyun struct trace_seq *s = &iter->seq;
3957*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
3958*4882a593Smuzhiyun
3959*4882a593Smuzhiyun if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3960*4882a593Smuzhiyun return;
3961*4882a593Smuzhiyun
3962*4882a593Smuzhiyun if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3963*4882a593Smuzhiyun return;
3964*4882a593Smuzhiyun
3965*4882a593Smuzhiyun if (cpumask_available(iter->started) &&
3966*4882a593Smuzhiyun cpumask_test_cpu(iter->cpu, iter->started))
3967*4882a593Smuzhiyun return;
3968*4882a593Smuzhiyun
3969*4882a593Smuzhiyun if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
3970*4882a593Smuzhiyun return;
3971*4882a593Smuzhiyun
3972*4882a593Smuzhiyun if (cpumask_available(iter->started))
3973*4882a593Smuzhiyun cpumask_set_cpu(iter->cpu, iter->started);
3974*4882a593Smuzhiyun
3975*4882a593Smuzhiyun /* Don't print started cpu buffer for the first entry of the trace */
3976*4882a593Smuzhiyun if (iter->idx > 1)
3977*4882a593Smuzhiyun trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3978*4882a593Smuzhiyun iter->cpu);
3979*4882a593Smuzhiyun }
3980*4882a593Smuzhiyun
print_trace_fmt(struct trace_iterator * iter)3981*4882a593Smuzhiyun static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3982*4882a593Smuzhiyun {
3983*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
3984*4882a593Smuzhiyun struct trace_seq *s = &iter->seq;
3985*4882a593Smuzhiyun unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3986*4882a593Smuzhiyun struct trace_entry *entry;
3987*4882a593Smuzhiyun struct trace_event *event;
3988*4882a593Smuzhiyun
3989*4882a593Smuzhiyun entry = iter->ent;
3990*4882a593Smuzhiyun
3991*4882a593Smuzhiyun test_cpu_buff_start(iter);
3992*4882a593Smuzhiyun
3993*4882a593Smuzhiyun event = ftrace_find_event(entry->type);
3994*4882a593Smuzhiyun
3995*4882a593Smuzhiyun if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3996*4882a593Smuzhiyun if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3997*4882a593Smuzhiyun trace_print_lat_context(iter);
3998*4882a593Smuzhiyun else
3999*4882a593Smuzhiyun trace_print_context(iter);
4000*4882a593Smuzhiyun }
4001*4882a593Smuzhiyun
4002*4882a593Smuzhiyun if (trace_seq_has_overflowed(s))
4003*4882a593Smuzhiyun return TRACE_TYPE_PARTIAL_LINE;
4004*4882a593Smuzhiyun
4005*4882a593Smuzhiyun if (event)
4006*4882a593Smuzhiyun return event->funcs->trace(iter, sym_flags, event);
4007*4882a593Smuzhiyun
4008*4882a593Smuzhiyun trace_seq_printf(s, "Unknown type %d\n", entry->type);
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun return trace_handle_return(s);
4011*4882a593Smuzhiyun }
4012*4882a593Smuzhiyun
print_raw_fmt(struct trace_iterator * iter)4013*4882a593Smuzhiyun static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4014*4882a593Smuzhiyun {
4015*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
4016*4882a593Smuzhiyun struct trace_seq *s = &iter->seq;
4017*4882a593Smuzhiyun struct trace_entry *entry;
4018*4882a593Smuzhiyun struct trace_event *event;
4019*4882a593Smuzhiyun
4020*4882a593Smuzhiyun entry = iter->ent;
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4023*4882a593Smuzhiyun trace_seq_printf(s, "%d %d %llu ",
4024*4882a593Smuzhiyun entry->pid, iter->cpu, iter->ts);
4025*4882a593Smuzhiyun
4026*4882a593Smuzhiyun if (trace_seq_has_overflowed(s))
4027*4882a593Smuzhiyun return TRACE_TYPE_PARTIAL_LINE;
4028*4882a593Smuzhiyun
4029*4882a593Smuzhiyun event = ftrace_find_event(entry->type);
4030*4882a593Smuzhiyun if (event)
4031*4882a593Smuzhiyun return event->funcs->raw(iter, 0, event);
4032*4882a593Smuzhiyun
4033*4882a593Smuzhiyun trace_seq_printf(s, "%d ?\n", entry->type);
4034*4882a593Smuzhiyun
4035*4882a593Smuzhiyun return trace_handle_return(s);
4036*4882a593Smuzhiyun }
4037*4882a593Smuzhiyun
print_hex_fmt(struct trace_iterator * iter)4038*4882a593Smuzhiyun static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4039*4882a593Smuzhiyun {
4040*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
4041*4882a593Smuzhiyun struct trace_seq *s = &iter->seq;
4042*4882a593Smuzhiyun unsigned char newline = '\n';
4043*4882a593Smuzhiyun struct trace_entry *entry;
4044*4882a593Smuzhiyun struct trace_event *event;
4045*4882a593Smuzhiyun
4046*4882a593Smuzhiyun entry = iter->ent;
4047*4882a593Smuzhiyun
4048*4882a593Smuzhiyun if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4049*4882a593Smuzhiyun SEQ_PUT_HEX_FIELD(s, entry->pid);
4050*4882a593Smuzhiyun SEQ_PUT_HEX_FIELD(s, iter->cpu);
4051*4882a593Smuzhiyun SEQ_PUT_HEX_FIELD(s, iter->ts);
4052*4882a593Smuzhiyun if (trace_seq_has_overflowed(s))
4053*4882a593Smuzhiyun return TRACE_TYPE_PARTIAL_LINE;
4054*4882a593Smuzhiyun }
4055*4882a593Smuzhiyun
4056*4882a593Smuzhiyun event = ftrace_find_event(entry->type);
4057*4882a593Smuzhiyun if (event) {
4058*4882a593Smuzhiyun enum print_line_t ret = event->funcs->hex(iter, 0, event);
4059*4882a593Smuzhiyun if (ret != TRACE_TYPE_HANDLED)
4060*4882a593Smuzhiyun return ret;
4061*4882a593Smuzhiyun }
4062*4882a593Smuzhiyun
4063*4882a593Smuzhiyun SEQ_PUT_FIELD(s, newline);
4064*4882a593Smuzhiyun
4065*4882a593Smuzhiyun return trace_handle_return(s);
4066*4882a593Smuzhiyun }
4067*4882a593Smuzhiyun
print_bin_fmt(struct trace_iterator * iter)4068*4882a593Smuzhiyun static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4069*4882a593Smuzhiyun {
4070*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
4071*4882a593Smuzhiyun struct trace_seq *s = &iter->seq;
4072*4882a593Smuzhiyun struct trace_entry *entry;
4073*4882a593Smuzhiyun struct trace_event *event;
4074*4882a593Smuzhiyun
4075*4882a593Smuzhiyun entry = iter->ent;
4076*4882a593Smuzhiyun
4077*4882a593Smuzhiyun if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4078*4882a593Smuzhiyun SEQ_PUT_FIELD(s, entry->pid);
4079*4882a593Smuzhiyun SEQ_PUT_FIELD(s, iter->cpu);
4080*4882a593Smuzhiyun SEQ_PUT_FIELD(s, iter->ts);
4081*4882a593Smuzhiyun if (trace_seq_has_overflowed(s))
4082*4882a593Smuzhiyun return TRACE_TYPE_PARTIAL_LINE;
4083*4882a593Smuzhiyun }
4084*4882a593Smuzhiyun
4085*4882a593Smuzhiyun event = ftrace_find_event(entry->type);
4086*4882a593Smuzhiyun return event ? event->funcs->binary(iter, 0, event) :
4087*4882a593Smuzhiyun TRACE_TYPE_HANDLED;
4088*4882a593Smuzhiyun }
4089*4882a593Smuzhiyun
trace_empty(struct trace_iterator * iter)4090*4882a593Smuzhiyun int trace_empty(struct trace_iterator *iter)
4091*4882a593Smuzhiyun {
4092*4882a593Smuzhiyun struct ring_buffer_iter *buf_iter;
4093*4882a593Smuzhiyun int cpu;
4094*4882a593Smuzhiyun
4095*4882a593Smuzhiyun /* If we are looking at one CPU buffer, only check that one */
4096*4882a593Smuzhiyun if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4097*4882a593Smuzhiyun cpu = iter->cpu_file;
4098*4882a593Smuzhiyun buf_iter = trace_buffer_iter(iter, cpu);
4099*4882a593Smuzhiyun if (buf_iter) {
4100*4882a593Smuzhiyun if (!ring_buffer_iter_empty(buf_iter))
4101*4882a593Smuzhiyun return 0;
4102*4882a593Smuzhiyun } else {
4103*4882a593Smuzhiyun if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4104*4882a593Smuzhiyun return 0;
4105*4882a593Smuzhiyun }
4106*4882a593Smuzhiyun return 1;
4107*4882a593Smuzhiyun }
4108*4882a593Smuzhiyun
4109*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
4110*4882a593Smuzhiyun buf_iter = trace_buffer_iter(iter, cpu);
4111*4882a593Smuzhiyun if (buf_iter) {
4112*4882a593Smuzhiyun if (!ring_buffer_iter_empty(buf_iter))
4113*4882a593Smuzhiyun return 0;
4114*4882a593Smuzhiyun } else {
4115*4882a593Smuzhiyun if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4116*4882a593Smuzhiyun return 0;
4117*4882a593Smuzhiyun }
4118*4882a593Smuzhiyun }
4119*4882a593Smuzhiyun
4120*4882a593Smuzhiyun return 1;
4121*4882a593Smuzhiyun }
4122*4882a593Smuzhiyun
4123*4882a593Smuzhiyun /* Called with trace_event_read_lock() held. */
print_trace_line(struct trace_iterator * iter)4124*4882a593Smuzhiyun enum print_line_t print_trace_line(struct trace_iterator *iter)
4125*4882a593Smuzhiyun {
4126*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
4127*4882a593Smuzhiyun unsigned long trace_flags = tr->trace_flags;
4128*4882a593Smuzhiyun enum print_line_t ret;
4129*4882a593Smuzhiyun
4130*4882a593Smuzhiyun if (iter->lost_events) {
4131*4882a593Smuzhiyun if (iter->lost_events == (unsigned long)-1)
4132*4882a593Smuzhiyun trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4133*4882a593Smuzhiyun iter->cpu);
4134*4882a593Smuzhiyun else
4135*4882a593Smuzhiyun trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4136*4882a593Smuzhiyun iter->cpu, iter->lost_events);
4137*4882a593Smuzhiyun if (trace_seq_has_overflowed(&iter->seq))
4138*4882a593Smuzhiyun return TRACE_TYPE_PARTIAL_LINE;
4139*4882a593Smuzhiyun }
4140*4882a593Smuzhiyun
4141*4882a593Smuzhiyun if (iter->trace && iter->trace->print_line) {
4142*4882a593Smuzhiyun ret = iter->trace->print_line(iter);
4143*4882a593Smuzhiyun if (ret != TRACE_TYPE_UNHANDLED)
4144*4882a593Smuzhiyun return ret;
4145*4882a593Smuzhiyun }
4146*4882a593Smuzhiyun
4147*4882a593Smuzhiyun if (iter->ent->type == TRACE_BPUTS &&
4148*4882a593Smuzhiyun trace_flags & TRACE_ITER_PRINTK &&
4149*4882a593Smuzhiyun trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4150*4882a593Smuzhiyun return trace_print_bputs_msg_only(iter);
4151*4882a593Smuzhiyun
4152*4882a593Smuzhiyun if (iter->ent->type == TRACE_BPRINT &&
4153*4882a593Smuzhiyun trace_flags & TRACE_ITER_PRINTK &&
4154*4882a593Smuzhiyun trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4155*4882a593Smuzhiyun return trace_print_bprintk_msg_only(iter);
4156*4882a593Smuzhiyun
4157*4882a593Smuzhiyun if (iter->ent->type == TRACE_PRINT &&
4158*4882a593Smuzhiyun trace_flags & TRACE_ITER_PRINTK &&
4159*4882a593Smuzhiyun trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4160*4882a593Smuzhiyun return trace_print_printk_msg_only(iter);
4161*4882a593Smuzhiyun
4162*4882a593Smuzhiyun if (trace_flags & TRACE_ITER_BIN)
4163*4882a593Smuzhiyun return print_bin_fmt(iter);
4164*4882a593Smuzhiyun
4165*4882a593Smuzhiyun if (trace_flags & TRACE_ITER_HEX)
4166*4882a593Smuzhiyun return print_hex_fmt(iter);
4167*4882a593Smuzhiyun
4168*4882a593Smuzhiyun if (trace_flags & TRACE_ITER_RAW)
4169*4882a593Smuzhiyun return print_raw_fmt(iter);
4170*4882a593Smuzhiyun
4171*4882a593Smuzhiyun return print_trace_fmt(iter);
4172*4882a593Smuzhiyun }
4173*4882a593Smuzhiyun
trace_latency_header(struct seq_file * m)4174*4882a593Smuzhiyun void trace_latency_header(struct seq_file *m)
4175*4882a593Smuzhiyun {
4176*4882a593Smuzhiyun struct trace_iterator *iter = m->private;
4177*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
4178*4882a593Smuzhiyun
4179*4882a593Smuzhiyun /* print nothing if the buffers are empty */
4180*4882a593Smuzhiyun if (trace_empty(iter))
4181*4882a593Smuzhiyun return;
4182*4882a593Smuzhiyun
4183*4882a593Smuzhiyun if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4184*4882a593Smuzhiyun print_trace_header(m, iter);
4185*4882a593Smuzhiyun
4186*4882a593Smuzhiyun if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4187*4882a593Smuzhiyun print_lat_help_header(m);
4188*4882a593Smuzhiyun }
4189*4882a593Smuzhiyun
trace_default_header(struct seq_file * m)4190*4882a593Smuzhiyun void trace_default_header(struct seq_file *m)
4191*4882a593Smuzhiyun {
4192*4882a593Smuzhiyun struct trace_iterator *iter = m->private;
4193*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
4194*4882a593Smuzhiyun unsigned long trace_flags = tr->trace_flags;
4195*4882a593Smuzhiyun
4196*4882a593Smuzhiyun if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4197*4882a593Smuzhiyun return;
4198*4882a593Smuzhiyun
4199*4882a593Smuzhiyun if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4200*4882a593Smuzhiyun /* print nothing if the buffers are empty */
4201*4882a593Smuzhiyun if (trace_empty(iter))
4202*4882a593Smuzhiyun return;
4203*4882a593Smuzhiyun print_trace_header(m, iter);
4204*4882a593Smuzhiyun if (!(trace_flags & TRACE_ITER_VERBOSE))
4205*4882a593Smuzhiyun print_lat_help_header(m);
4206*4882a593Smuzhiyun } else {
4207*4882a593Smuzhiyun if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4208*4882a593Smuzhiyun if (trace_flags & TRACE_ITER_IRQ_INFO)
4209*4882a593Smuzhiyun print_func_help_header_irq(iter->array_buffer,
4210*4882a593Smuzhiyun m, trace_flags);
4211*4882a593Smuzhiyun else
4212*4882a593Smuzhiyun print_func_help_header(iter->array_buffer, m,
4213*4882a593Smuzhiyun trace_flags);
4214*4882a593Smuzhiyun }
4215*4882a593Smuzhiyun }
4216*4882a593Smuzhiyun }
4217*4882a593Smuzhiyun
test_ftrace_alive(struct seq_file * m)4218*4882a593Smuzhiyun static void test_ftrace_alive(struct seq_file *m)
4219*4882a593Smuzhiyun {
4220*4882a593Smuzhiyun if (!ftrace_is_dead())
4221*4882a593Smuzhiyun return;
4222*4882a593Smuzhiyun seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4223*4882a593Smuzhiyun "# MAY BE MISSING FUNCTION EVENTS\n");
4224*4882a593Smuzhiyun }
4225*4882a593Smuzhiyun
4226*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
show_snapshot_main_help(struct seq_file * m)4227*4882a593Smuzhiyun static void show_snapshot_main_help(struct seq_file *m)
4228*4882a593Smuzhiyun {
4229*4882a593Smuzhiyun seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4230*4882a593Smuzhiyun "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4231*4882a593Smuzhiyun "# Takes a snapshot of the main buffer.\n"
4232*4882a593Smuzhiyun "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4233*4882a593Smuzhiyun "# (Doesn't have to be '2' works with any number that\n"
4234*4882a593Smuzhiyun "# is not a '0' or '1')\n");
4235*4882a593Smuzhiyun }
4236*4882a593Smuzhiyun
show_snapshot_percpu_help(struct seq_file * m)4237*4882a593Smuzhiyun static void show_snapshot_percpu_help(struct seq_file *m)
4238*4882a593Smuzhiyun {
4239*4882a593Smuzhiyun seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4240*4882a593Smuzhiyun #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4241*4882a593Smuzhiyun seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4242*4882a593Smuzhiyun "# Takes a snapshot of the main buffer for this cpu.\n");
4243*4882a593Smuzhiyun #else
4244*4882a593Smuzhiyun seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4245*4882a593Smuzhiyun "# Must use main snapshot file to allocate.\n");
4246*4882a593Smuzhiyun #endif
4247*4882a593Smuzhiyun seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4248*4882a593Smuzhiyun "# (Doesn't have to be '2' works with any number that\n"
4249*4882a593Smuzhiyun "# is not a '0' or '1')\n");
4250*4882a593Smuzhiyun }
4251*4882a593Smuzhiyun
print_snapshot_help(struct seq_file * m,struct trace_iterator * iter)4252*4882a593Smuzhiyun static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4253*4882a593Smuzhiyun {
4254*4882a593Smuzhiyun if (iter->tr->allocated_snapshot)
4255*4882a593Smuzhiyun seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4256*4882a593Smuzhiyun else
4257*4882a593Smuzhiyun seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4258*4882a593Smuzhiyun
4259*4882a593Smuzhiyun seq_puts(m, "# Snapshot commands:\n");
4260*4882a593Smuzhiyun if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4261*4882a593Smuzhiyun show_snapshot_main_help(m);
4262*4882a593Smuzhiyun else
4263*4882a593Smuzhiyun show_snapshot_percpu_help(m);
4264*4882a593Smuzhiyun }
4265*4882a593Smuzhiyun #else
4266*4882a593Smuzhiyun /* Should never be called */
print_snapshot_help(struct seq_file * m,struct trace_iterator * iter)4267*4882a593Smuzhiyun static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4268*4882a593Smuzhiyun #endif
4269*4882a593Smuzhiyun
s_show(struct seq_file * m,void * v)4270*4882a593Smuzhiyun static int s_show(struct seq_file *m, void *v)
4271*4882a593Smuzhiyun {
4272*4882a593Smuzhiyun struct trace_iterator *iter = v;
4273*4882a593Smuzhiyun int ret;
4274*4882a593Smuzhiyun
4275*4882a593Smuzhiyun if (iter->ent == NULL) {
4276*4882a593Smuzhiyun if (iter->tr) {
4277*4882a593Smuzhiyun seq_printf(m, "# tracer: %s\n", iter->trace->name);
4278*4882a593Smuzhiyun seq_puts(m, "#\n");
4279*4882a593Smuzhiyun test_ftrace_alive(m);
4280*4882a593Smuzhiyun }
4281*4882a593Smuzhiyun if (iter->snapshot && trace_empty(iter))
4282*4882a593Smuzhiyun print_snapshot_help(m, iter);
4283*4882a593Smuzhiyun else if (iter->trace && iter->trace->print_header)
4284*4882a593Smuzhiyun iter->trace->print_header(m);
4285*4882a593Smuzhiyun else
4286*4882a593Smuzhiyun trace_default_header(m);
4287*4882a593Smuzhiyun
4288*4882a593Smuzhiyun } else if (iter->leftover) {
4289*4882a593Smuzhiyun /*
4290*4882a593Smuzhiyun * If we filled the seq_file buffer earlier, we
4291*4882a593Smuzhiyun * want to just show it now.
4292*4882a593Smuzhiyun */
4293*4882a593Smuzhiyun ret = trace_print_seq(m, &iter->seq);
4294*4882a593Smuzhiyun
4295*4882a593Smuzhiyun /* ret should this time be zero, but you never know */
4296*4882a593Smuzhiyun iter->leftover = ret;
4297*4882a593Smuzhiyun
4298*4882a593Smuzhiyun } else {
4299*4882a593Smuzhiyun print_trace_line(iter);
4300*4882a593Smuzhiyun ret = trace_print_seq(m, &iter->seq);
4301*4882a593Smuzhiyun /*
4302*4882a593Smuzhiyun * If we overflow the seq_file buffer, then it will
4303*4882a593Smuzhiyun * ask us for this data again at start up.
4304*4882a593Smuzhiyun * Use that instead.
4305*4882a593Smuzhiyun * ret is 0 if seq_file write succeeded.
4306*4882a593Smuzhiyun * -1 otherwise.
4307*4882a593Smuzhiyun */
4308*4882a593Smuzhiyun iter->leftover = ret;
4309*4882a593Smuzhiyun }
4310*4882a593Smuzhiyun
4311*4882a593Smuzhiyun return 0;
4312*4882a593Smuzhiyun }
4313*4882a593Smuzhiyun
4314*4882a593Smuzhiyun /*
4315*4882a593Smuzhiyun * Should be used after trace_array_get(), trace_types_lock
4316*4882a593Smuzhiyun * ensures that i_cdev was already initialized.
4317*4882a593Smuzhiyun */
tracing_get_cpu(struct inode * inode)4318*4882a593Smuzhiyun static inline int tracing_get_cpu(struct inode *inode)
4319*4882a593Smuzhiyun {
4320*4882a593Smuzhiyun if (inode->i_cdev) /* See trace_create_cpu_file() */
4321*4882a593Smuzhiyun return (long)inode->i_cdev - 1;
4322*4882a593Smuzhiyun return RING_BUFFER_ALL_CPUS;
4323*4882a593Smuzhiyun }
4324*4882a593Smuzhiyun
4325*4882a593Smuzhiyun static const struct seq_operations tracer_seq_ops = {
4326*4882a593Smuzhiyun .start = s_start,
4327*4882a593Smuzhiyun .next = s_next,
4328*4882a593Smuzhiyun .stop = s_stop,
4329*4882a593Smuzhiyun .show = s_show,
4330*4882a593Smuzhiyun };
4331*4882a593Smuzhiyun
4332*4882a593Smuzhiyun static struct trace_iterator *
__tracing_open(struct inode * inode,struct file * file,bool snapshot)4333*4882a593Smuzhiyun __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4334*4882a593Smuzhiyun {
4335*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
4336*4882a593Smuzhiyun struct trace_iterator *iter;
4337*4882a593Smuzhiyun int cpu;
4338*4882a593Smuzhiyun
4339*4882a593Smuzhiyun if (tracing_disabled)
4340*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
4341*4882a593Smuzhiyun
4342*4882a593Smuzhiyun iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4343*4882a593Smuzhiyun if (!iter)
4344*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
4345*4882a593Smuzhiyun
4346*4882a593Smuzhiyun iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4347*4882a593Smuzhiyun GFP_KERNEL);
4348*4882a593Smuzhiyun if (!iter->buffer_iter)
4349*4882a593Smuzhiyun goto release;
4350*4882a593Smuzhiyun
4351*4882a593Smuzhiyun /*
4352*4882a593Smuzhiyun * trace_find_next_entry() may need to save off iter->ent.
4353*4882a593Smuzhiyun * It will place it into the iter->temp buffer. As most
4354*4882a593Smuzhiyun * events are less than 128, allocate a buffer of that size.
4355*4882a593Smuzhiyun * If one is greater, then trace_find_next_entry() will
4356*4882a593Smuzhiyun * allocate a new buffer to adjust for the bigger iter->ent.
4357*4882a593Smuzhiyun * It's not critical if it fails to get allocated here.
4358*4882a593Smuzhiyun */
4359*4882a593Smuzhiyun iter->temp = kmalloc(128, GFP_KERNEL);
4360*4882a593Smuzhiyun if (iter->temp)
4361*4882a593Smuzhiyun iter->temp_size = 128;
4362*4882a593Smuzhiyun
4363*4882a593Smuzhiyun /*
4364*4882a593Smuzhiyun * We make a copy of the current tracer to avoid concurrent
4365*4882a593Smuzhiyun * changes on it while we are reading.
4366*4882a593Smuzhiyun */
4367*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
4368*4882a593Smuzhiyun iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4369*4882a593Smuzhiyun if (!iter->trace)
4370*4882a593Smuzhiyun goto fail;
4371*4882a593Smuzhiyun
4372*4882a593Smuzhiyun *iter->trace = *tr->current_trace;
4373*4882a593Smuzhiyun
4374*4882a593Smuzhiyun if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4375*4882a593Smuzhiyun goto fail;
4376*4882a593Smuzhiyun
4377*4882a593Smuzhiyun iter->tr = tr;
4378*4882a593Smuzhiyun
4379*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
4380*4882a593Smuzhiyun /* Currently only the top directory has a snapshot */
4381*4882a593Smuzhiyun if (tr->current_trace->print_max || snapshot)
4382*4882a593Smuzhiyun iter->array_buffer = &tr->max_buffer;
4383*4882a593Smuzhiyun else
4384*4882a593Smuzhiyun #endif
4385*4882a593Smuzhiyun iter->array_buffer = &tr->array_buffer;
4386*4882a593Smuzhiyun iter->snapshot = snapshot;
4387*4882a593Smuzhiyun iter->pos = -1;
4388*4882a593Smuzhiyun iter->cpu_file = tracing_get_cpu(inode);
4389*4882a593Smuzhiyun mutex_init(&iter->mutex);
4390*4882a593Smuzhiyun
4391*4882a593Smuzhiyun /* Notify the tracer early; before we stop tracing. */
4392*4882a593Smuzhiyun if (iter->trace->open)
4393*4882a593Smuzhiyun iter->trace->open(iter);
4394*4882a593Smuzhiyun
4395*4882a593Smuzhiyun /* Annotate start of buffers if we had overruns */
4396*4882a593Smuzhiyun if (ring_buffer_overruns(iter->array_buffer->buffer))
4397*4882a593Smuzhiyun iter->iter_flags |= TRACE_FILE_ANNOTATE;
4398*4882a593Smuzhiyun
4399*4882a593Smuzhiyun /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4400*4882a593Smuzhiyun if (trace_clocks[tr->clock_id].in_ns)
4401*4882a593Smuzhiyun iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4402*4882a593Smuzhiyun
4403*4882a593Smuzhiyun /*
4404*4882a593Smuzhiyun * If pause-on-trace is enabled, then stop the trace while
4405*4882a593Smuzhiyun * dumping, unless this is the "snapshot" file
4406*4882a593Smuzhiyun */
4407*4882a593Smuzhiyun if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4408*4882a593Smuzhiyun tracing_stop_tr(tr);
4409*4882a593Smuzhiyun
4410*4882a593Smuzhiyun if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4411*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
4412*4882a593Smuzhiyun iter->buffer_iter[cpu] =
4413*4882a593Smuzhiyun ring_buffer_read_prepare(iter->array_buffer->buffer,
4414*4882a593Smuzhiyun cpu, GFP_KERNEL);
4415*4882a593Smuzhiyun }
4416*4882a593Smuzhiyun ring_buffer_read_prepare_sync();
4417*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
4418*4882a593Smuzhiyun ring_buffer_read_start(iter->buffer_iter[cpu]);
4419*4882a593Smuzhiyun tracing_iter_reset(iter, cpu);
4420*4882a593Smuzhiyun }
4421*4882a593Smuzhiyun } else {
4422*4882a593Smuzhiyun cpu = iter->cpu_file;
4423*4882a593Smuzhiyun iter->buffer_iter[cpu] =
4424*4882a593Smuzhiyun ring_buffer_read_prepare(iter->array_buffer->buffer,
4425*4882a593Smuzhiyun cpu, GFP_KERNEL);
4426*4882a593Smuzhiyun ring_buffer_read_prepare_sync();
4427*4882a593Smuzhiyun ring_buffer_read_start(iter->buffer_iter[cpu]);
4428*4882a593Smuzhiyun tracing_iter_reset(iter, cpu);
4429*4882a593Smuzhiyun }
4430*4882a593Smuzhiyun
4431*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
4432*4882a593Smuzhiyun
4433*4882a593Smuzhiyun return iter;
4434*4882a593Smuzhiyun
4435*4882a593Smuzhiyun fail:
4436*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
4437*4882a593Smuzhiyun kfree(iter->trace);
4438*4882a593Smuzhiyun kfree(iter->temp);
4439*4882a593Smuzhiyun kfree(iter->buffer_iter);
4440*4882a593Smuzhiyun release:
4441*4882a593Smuzhiyun seq_release_private(inode, file);
4442*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
4443*4882a593Smuzhiyun }
4444*4882a593Smuzhiyun
tracing_open_generic(struct inode * inode,struct file * filp)4445*4882a593Smuzhiyun int tracing_open_generic(struct inode *inode, struct file *filp)
4446*4882a593Smuzhiyun {
4447*4882a593Smuzhiyun int ret;
4448*4882a593Smuzhiyun
4449*4882a593Smuzhiyun ret = tracing_check_open_get_tr(NULL);
4450*4882a593Smuzhiyun if (ret)
4451*4882a593Smuzhiyun return ret;
4452*4882a593Smuzhiyun
4453*4882a593Smuzhiyun filp->private_data = inode->i_private;
4454*4882a593Smuzhiyun return 0;
4455*4882a593Smuzhiyun }
4456*4882a593Smuzhiyun
tracing_is_disabled(void)4457*4882a593Smuzhiyun bool tracing_is_disabled(void)
4458*4882a593Smuzhiyun {
4459*4882a593Smuzhiyun return (tracing_disabled) ? true: false;
4460*4882a593Smuzhiyun }
4461*4882a593Smuzhiyun
4462*4882a593Smuzhiyun /*
4463*4882a593Smuzhiyun * Open and update trace_array ref count.
4464*4882a593Smuzhiyun * Must have the current trace_array passed to it.
4465*4882a593Smuzhiyun */
tracing_open_generic_tr(struct inode * inode,struct file * filp)4466*4882a593Smuzhiyun int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4467*4882a593Smuzhiyun {
4468*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
4469*4882a593Smuzhiyun int ret;
4470*4882a593Smuzhiyun
4471*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
4472*4882a593Smuzhiyun if (ret)
4473*4882a593Smuzhiyun return ret;
4474*4882a593Smuzhiyun
4475*4882a593Smuzhiyun filp->private_data = inode->i_private;
4476*4882a593Smuzhiyun
4477*4882a593Smuzhiyun return 0;
4478*4882a593Smuzhiyun }
4479*4882a593Smuzhiyun
tracing_release(struct inode * inode,struct file * file)4480*4882a593Smuzhiyun static int tracing_release(struct inode *inode, struct file *file)
4481*4882a593Smuzhiyun {
4482*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
4483*4882a593Smuzhiyun struct seq_file *m = file->private_data;
4484*4882a593Smuzhiyun struct trace_iterator *iter;
4485*4882a593Smuzhiyun int cpu;
4486*4882a593Smuzhiyun
4487*4882a593Smuzhiyun if (!(file->f_mode & FMODE_READ)) {
4488*4882a593Smuzhiyun trace_array_put(tr);
4489*4882a593Smuzhiyun return 0;
4490*4882a593Smuzhiyun }
4491*4882a593Smuzhiyun
4492*4882a593Smuzhiyun /* Writes do not use seq_file */
4493*4882a593Smuzhiyun iter = m->private;
4494*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
4495*4882a593Smuzhiyun
4496*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
4497*4882a593Smuzhiyun if (iter->buffer_iter[cpu])
4498*4882a593Smuzhiyun ring_buffer_read_finish(iter->buffer_iter[cpu]);
4499*4882a593Smuzhiyun }
4500*4882a593Smuzhiyun
4501*4882a593Smuzhiyun if (iter->trace && iter->trace->close)
4502*4882a593Smuzhiyun iter->trace->close(iter);
4503*4882a593Smuzhiyun
4504*4882a593Smuzhiyun if (!iter->snapshot && tr->stop_count)
4505*4882a593Smuzhiyun /* reenable tracing if it was previously enabled */
4506*4882a593Smuzhiyun tracing_start_tr(tr);
4507*4882a593Smuzhiyun
4508*4882a593Smuzhiyun __trace_array_put(tr);
4509*4882a593Smuzhiyun
4510*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
4511*4882a593Smuzhiyun
4512*4882a593Smuzhiyun mutex_destroy(&iter->mutex);
4513*4882a593Smuzhiyun free_cpumask_var(iter->started);
4514*4882a593Smuzhiyun kfree(iter->temp);
4515*4882a593Smuzhiyun kfree(iter->trace);
4516*4882a593Smuzhiyun kfree(iter->buffer_iter);
4517*4882a593Smuzhiyun seq_release_private(inode, file);
4518*4882a593Smuzhiyun
4519*4882a593Smuzhiyun return 0;
4520*4882a593Smuzhiyun }
4521*4882a593Smuzhiyun
tracing_release_generic_tr(struct inode * inode,struct file * file)4522*4882a593Smuzhiyun static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4523*4882a593Smuzhiyun {
4524*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
4525*4882a593Smuzhiyun
4526*4882a593Smuzhiyun trace_array_put(tr);
4527*4882a593Smuzhiyun return 0;
4528*4882a593Smuzhiyun }
4529*4882a593Smuzhiyun
tracing_single_release_tr(struct inode * inode,struct file * file)4530*4882a593Smuzhiyun static int tracing_single_release_tr(struct inode *inode, struct file *file)
4531*4882a593Smuzhiyun {
4532*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
4533*4882a593Smuzhiyun
4534*4882a593Smuzhiyun trace_array_put(tr);
4535*4882a593Smuzhiyun
4536*4882a593Smuzhiyun return single_release(inode, file);
4537*4882a593Smuzhiyun }
4538*4882a593Smuzhiyun
tracing_open(struct inode * inode,struct file * file)4539*4882a593Smuzhiyun static int tracing_open(struct inode *inode, struct file *file)
4540*4882a593Smuzhiyun {
4541*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
4542*4882a593Smuzhiyun struct trace_iterator *iter;
4543*4882a593Smuzhiyun int ret;
4544*4882a593Smuzhiyun
4545*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
4546*4882a593Smuzhiyun if (ret)
4547*4882a593Smuzhiyun return ret;
4548*4882a593Smuzhiyun
4549*4882a593Smuzhiyun /* If this file was open for write, then erase contents */
4550*4882a593Smuzhiyun if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4551*4882a593Smuzhiyun int cpu = tracing_get_cpu(inode);
4552*4882a593Smuzhiyun struct array_buffer *trace_buf = &tr->array_buffer;
4553*4882a593Smuzhiyun
4554*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
4555*4882a593Smuzhiyun if (tr->current_trace->print_max)
4556*4882a593Smuzhiyun trace_buf = &tr->max_buffer;
4557*4882a593Smuzhiyun #endif
4558*4882a593Smuzhiyun
4559*4882a593Smuzhiyun if (cpu == RING_BUFFER_ALL_CPUS)
4560*4882a593Smuzhiyun tracing_reset_online_cpus(trace_buf);
4561*4882a593Smuzhiyun else
4562*4882a593Smuzhiyun tracing_reset_cpu(trace_buf, cpu);
4563*4882a593Smuzhiyun }
4564*4882a593Smuzhiyun
4565*4882a593Smuzhiyun if (file->f_mode & FMODE_READ) {
4566*4882a593Smuzhiyun iter = __tracing_open(inode, file, false);
4567*4882a593Smuzhiyun if (IS_ERR(iter))
4568*4882a593Smuzhiyun ret = PTR_ERR(iter);
4569*4882a593Smuzhiyun else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4570*4882a593Smuzhiyun iter->iter_flags |= TRACE_FILE_LAT_FMT;
4571*4882a593Smuzhiyun }
4572*4882a593Smuzhiyun
4573*4882a593Smuzhiyun if (ret < 0)
4574*4882a593Smuzhiyun trace_array_put(tr);
4575*4882a593Smuzhiyun
4576*4882a593Smuzhiyun return ret;
4577*4882a593Smuzhiyun }
4578*4882a593Smuzhiyun
4579*4882a593Smuzhiyun /*
4580*4882a593Smuzhiyun * Some tracers are not suitable for instance buffers.
4581*4882a593Smuzhiyun * A tracer is always available for the global array (toplevel)
4582*4882a593Smuzhiyun * or if it explicitly states that it is.
4583*4882a593Smuzhiyun */
4584*4882a593Smuzhiyun static bool
trace_ok_for_array(struct tracer * t,struct trace_array * tr)4585*4882a593Smuzhiyun trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4586*4882a593Smuzhiyun {
4587*4882a593Smuzhiyun return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4588*4882a593Smuzhiyun }
4589*4882a593Smuzhiyun
4590*4882a593Smuzhiyun /* Find the next tracer that this trace array may use */
4591*4882a593Smuzhiyun static struct tracer *
get_tracer_for_array(struct trace_array * tr,struct tracer * t)4592*4882a593Smuzhiyun get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4593*4882a593Smuzhiyun {
4594*4882a593Smuzhiyun while (t && !trace_ok_for_array(t, tr))
4595*4882a593Smuzhiyun t = t->next;
4596*4882a593Smuzhiyun
4597*4882a593Smuzhiyun return t;
4598*4882a593Smuzhiyun }
4599*4882a593Smuzhiyun
4600*4882a593Smuzhiyun static void *
t_next(struct seq_file * m,void * v,loff_t * pos)4601*4882a593Smuzhiyun t_next(struct seq_file *m, void *v, loff_t *pos)
4602*4882a593Smuzhiyun {
4603*4882a593Smuzhiyun struct trace_array *tr = m->private;
4604*4882a593Smuzhiyun struct tracer *t = v;
4605*4882a593Smuzhiyun
4606*4882a593Smuzhiyun (*pos)++;
4607*4882a593Smuzhiyun
4608*4882a593Smuzhiyun if (t)
4609*4882a593Smuzhiyun t = get_tracer_for_array(tr, t->next);
4610*4882a593Smuzhiyun
4611*4882a593Smuzhiyun return t;
4612*4882a593Smuzhiyun }
4613*4882a593Smuzhiyun
t_start(struct seq_file * m,loff_t * pos)4614*4882a593Smuzhiyun static void *t_start(struct seq_file *m, loff_t *pos)
4615*4882a593Smuzhiyun {
4616*4882a593Smuzhiyun struct trace_array *tr = m->private;
4617*4882a593Smuzhiyun struct tracer *t;
4618*4882a593Smuzhiyun loff_t l = 0;
4619*4882a593Smuzhiyun
4620*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
4621*4882a593Smuzhiyun
4622*4882a593Smuzhiyun t = get_tracer_for_array(tr, trace_types);
4623*4882a593Smuzhiyun for (; t && l < *pos; t = t_next(m, t, &l))
4624*4882a593Smuzhiyun ;
4625*4882a593Smuzhiyun
4626*4882a593Smuzhiyun return t;
4627*4882a593Smuzhiyun }
4628*4882a593Smuzhiyun
t_stop(struct seq_file * m,void * p)4629*4882a593Smuzhiyun static void t_stop(struct seq_file *m, void *p)
4630*4882a593Smuzhiyun {
4631*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
4632*4882a593Smuzhiyun }
4633*4882a593Smuzhiyun
t_show(struct seq_file * m,void * v)4634*4882a593Smuzhiyun static int t_show(struct seq_file *m, void *v)
4635*4882a593Smuzhiyun {
4636*4882a593Smuzhiyun struct tracer *t = v;
4637*4882a593Smuzhiyun
4638*4882a593Smuzhiyun if (!t)
4639*4882a593Smuzhiyun return 0;
4640*4882a593Smuzhiyun
4641*4882a593Smuzhiyun seq_puts(m, t->name);
4642*4882a593Smuzhiyun if (t->next)
4643*4882a593Smuzhiyun seq_putc(m, ' ');
4644*4882a593Smuzhiyun else
4645*4882a593Smuzhiyun seq_putc(m, '\n');
4646*4882a593Smuzhiyun
4647*4882a593Smuzhiyun return 0;
4648*4882a593Smuzhiyun }
4649*4882a593Smuzhiyun
4650*4882a593Smuzhiyun static const struct seq_operations show_traces_seq_ops = {
4651*4882a593Smuzhiyun .start = t_start,
4652*4882a593Smuzhiyun .next = t_next,
4653*4882a593Smuzhiyun .stop = t_stop,
4654*4882a593Smuzhiyun .show = t_show,
4655*4882a593Smuzhiyun };
4656*4882a593Smuzhiyun
show_traces_open(struct inode * inode,struct file * file)4657*4882a593Smuzhiyun static int show_traces_open(struct inode *inode, struct file *file)
4658*4882a593Smuzhiyun {
4659*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
4660*4882a593Smuzhiyun struct seq_file *m;
4661*4882a593Smuzhiyun int ret;
4662*4882a593Smuzhiyun
4663*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
4664*4882a593Smuzhiyun if (ret)
4665*4882a593Smuzhiyun return ret;
4666*4882a593Smuzhiyun
4667*4882a593Smuzhiyun ret = seq_open(file, &show_traces_seq_ops);
4668*4882a593Smuzhiyun if (ret) {
4669*4882a593Smuzhiyun trace_array_put(tr);
4670*4882a593Smuzhiyun return ret;
4671*4882a593Smuzhiyun }
4672*4882a593Smuzhiyun
4673*4882a593Smuzhiyun m = file->private_data;
4674*4882a593Smuzhiyun m->private = tr;
4675*4882a593Smuzhiyun
4676*4882a593Smuzhiyun return 0;
4677*4882a593Smuzhiyun }
4678*4882a593Smuzhiyun
show_traces_release(struct inode * inode,struct file * file)4679*4882a593Smuzhiyun static int show_traces_release(struct inode *inode, struct file *file)
4680*4882a593Smuzhiyun {
4681*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
4682*4882a593Smuzhiyun
4683*4882a593Smuzhiyun trace_array_put(tr);
4684*4882a593Smuzhiyun return seq_release(inode, file);
4685*4882a593Smuzhiyun }
4686*4882a593Smuzhiyun
4687*4882a593Smuzhiyun static ssize_t
tracing_write_stub(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)4688*4882a593Smuzhiyun tracing_write_stub(struct file *filp, const char __user *ubuf,
4689*4882a593Smuzhiyun size_t count, loff_t *ppos)
4690*4882a593Smuzhiyun {
4691*4882a593Smuzhiyun return count;
4692*4882a593Smuzhiyun }
4693*4882a593Smuzhiyun
tracing_lseek(struct file * file,loff_t offset,int whence)4694*4882a593Smuzhiyun loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4695*4882a593Smuzhiyun {
4696*4882a593Smuzhiyun int ret;
4697*4882a593Smuzhiyun
4698*4882a593Smuzhiyun if (file->f_mode & FMODE_READ)
4699*4882a593Smuzhiyun ret = seq_lseek(file, offset, whence);
4700*4882a593Smuzhiyun else
4701*4882a593Smuzhiyun file->f_pos = ret = 0;
4702*4882a593Smuzhiyun
4703*4882a593Smuzhiyun return ret;
4704*4882a593Smuzhiyun }
4705*4882a593Smuzhiyun
4706*4882a593Smuzhiyun static const struct file_operations tracing_fops = {
4707*4882a593Smuzhiyun .open = tracing_open,
4708*4882a593Smuzhiyun .read = seq_read,
4709*4882a593Smuzhiyun .write = tracing_write_stub,
4710*4882a593Smuzhiyun .llseek = tracing_lseek,
4711*4882a593Smuzhiyun .release = tracing_release,
4712*4882a593Smuzhiyun };
4713*4882a593Smuzhiyun
4714*4882a593Smuzhiyun static const struct file_operations show_traces_fops = {
4715*4882a593Smuzhiyun .open = show_traces_open,
4716*4882a593Smuzhiyun .read = seq_read,
4717*4882a593Smuzhiyun .llseek = seq_lseek,
4718*4882a593Smuzhiyun .release = show_traces_release,
4719*4882a593Smuzhiyun };
4720*4882a593Smuzhiyun
4721*4882a593Smuzhiyun static ssize_t
tracing_cpumask_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)4722*4882a593Smuzhiyun tracing_cpumask_read(struct file *filp, char __user *ubuf,
4723*4882a593Smuzhiyun size_t count, loff_t *ppos)
4724*4882a593Smuzhiyun {
4725*4882a593Smuzhiyun struct trace_array *tr = file_inode(filp)->i_private;
4726*4882a593Smuzhiyun char *mask_str;
4727*4882a593Smuzhiyun int len;
4728*4882a593Smuzhiyun
4729*4882a593Smuzhiyun len = snprintf(NULL, 0, "%*pb\n",
4730*4882a593Smuzhiyun cpumask_pr_args(tr->tracing_cpumask)) + 1;
4731*4882a593Smuzhiyun mask_str = kmalloc(len, GFP_KERNEL);
4732*4882a593Smuzhiyun if (!mask_str)
4733*4882a593Smuzhiyun return -ENOMEM;
4734*4882a593Smuzhiyun
4735*4882a593Smuzhiyun len = snprintf(mask_str, len, "%*pb\n",
4736*4882a593Smuzhiyun cpumask_pr_args(tr->tracing_cpumask));
4737*4882a593Smuzhiyun if (len >= count) {
4738*4882a593Smuzhiyun count = -EINVAL;
4739*4882a593Smuzhiyun goto out_err;
4740*4882a593Smuzhiyun }
4741*4882a593Smuzhiyun count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4742*4882a593Smuzhiyun
4743*4882a593Smuzhiyun out_err:
4744*4882a593Smuzhiyun kfree(mask_str);
4745*4882a593Smuzhiyun
4746*4882a593Smuzhiyun return count;
4747*4882a593Smuzhiyun }
4748*4882a593Smuzhiyun
tracing_set_cpumask(struct trace_array * tr,cpumask_var_t tracing_cpumask_new)4749*4882a593Smuzhiyun int tracing_set_cpumask(struct trace_array *tr,
4750*4882a593Smuzhiyun cpumask_var_t tracing_cpumask_new)
4751*4882a593Smuzhiyun {
4752*4882a593Smuzhiyun int cpu;
4753*4882a593Smuzhiyun
4754*4882a593Smuzhiyun if (!tr)
4755*4882a593Smuzhiyun return -EINVAL;
4756*4882a593Smuzhiyun
4757*4882a593Smuzhiyun local_irq_disable();
4758*4882a593Smuzhiyun arch_spin_lock(&tr->max_lock);
4759*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
4760*4882a593Smuzhiyun /*
4761*4882a593Smuzhiyun * Increase/decrease the disabled counter if we are
4762*4882a593Smuzhiyun * about to flip a bit in the cpumask:
4763*4882a593Smuzhiyun */
4764*4882a593Smuzhiyun if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4765*4882a593Smuzhiyun !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4766*4882a593Smuzhiyun atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4767*4882a593Smuzhiyun ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4768*4882a593Smuzhiyun }
4769*4882a593Smuzhiyun if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4770*4882a593Smuzhiyun cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4771*4882a593Smuzhiyun atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4772*4882a593Smuzhiyun ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4773*4882a593Smuzhiyun }
4774*4882a593Smuzhiyun }
4775*4882a593Smuzhiyun arch_spin_unlock(&tr->max_lock);
4776*4882a593Smuzhiyun local_irq_enable();
4777*4882a593Smuzhiyun
4778*4882a593Smuzhiyun cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4779*4882a593Smuzhiyun
4780*4882a593Smuzhiyun return 0;
4781*4882a593Smuzhiyun }
4782*4882a593Smuzhiyun
4783*4882a593Smuzhiyun static ssize_t
tracing_cpumask_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)4784*4882a593Smuzhiyun tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4785*4882a593Smuzhiyun size_t count, loff_t *ppos)
4786*4882a593Smuzhiyun {
4787*4882a593Smuzhiyun struct trace_array *tr = file_inode(filp)->i_private;
4788*4882a593Smuzhiyun cpumask_var_t tracing_cpumask_new;
4789*4882a593Smuzhiyun int err;
4790*4882a593Smuzhiyun
4791*4882a593Smuzhiyun if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4792*4882a593Smuzhiyun return -ENOMEM;
4793*4882a593Smuzhiyun
4794*4882a593Smuzhiyun err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4795*4882a593Smuzhiyun if (err)
4796*4882a593Smuzhiyun goto err_free;
4797*4882a593Smuzhiyun
4798*4882a593Smuzhiyun err = tracing_set_cpumask(tr, tracing_cpumask_new);
4799*4882a593Smuzhiyun if (err)
4800*4882a593Smuzhiyun goto err_free;
4801*4882a593Smuzhiyun
4802*4882a593Smuzhiyun free_cpumask_var(tracing_cpumask_new);
4803*4882a593Smuzhiyun
4804*4882a593Smuzhiyun return count;
4805*4882a593Smuzhiyun
4806*4882a593Smuzhiyun err_free:
4807*4882a593Smuzhiyun free_cpumask_var(tracing_cpumask_new);
4808*4882a593Smuzhiyun
4809*4882a593Smuzhiyun return err;
4810*4882a593Smuzhiyun }
4811*4882a593Smuzhiyun
4812*4882a593Smuzhiyun static const struct file_operations tracing_cpumask_fops = {
4813*4882a593Smuzhiyun .open = tracing_open_generic_tr,
4814*4882a593Smuzhiyun .read = tracing_cpumask_read,
4815*4882a593Smuzhiyun .write = tracing_cpumask_write,
4816*4882a593Smuzhiyun .release = tracing_release_generic_tr,
4817*4882a593Smuzhiyun .llseek = generic_file_llseek,
4818*4882a593Smuzhiyun };
4819*4882a593Smuzhiyun
tracing_trace_options_show(struct seq_file * m,void * v)4820*4882a593Smuzhiyun static int tracing_trace_options_show(struct seq_file *m, void *v)
4821*4882a593Smuzhiyun {
4822*4882a593Smuzhiyun struct tracer_opt *trace_opts;
4823*4882a593Smuzhiyun struct trace_array *tr = m->private;
4824*4882a593Smuzhiyun u32 tracer_flags;
4825*4882a593Smuzhiyun int i;
4826*4882a593Smuzhiyun
4827*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
4828*4882a593Smuzhiyun tracer_flags = tr->current_trace->flags->val;
4829*4882a593Smuzhiyun trace_opts = tr->current_trace->flags->opts;
4830*4882a593Smuzhiyun
4831*4882a593Smuzhiyun for (i = 0; trace_options[i]; i++) {
4832*4882a593Smuzhiyun if (tr->trace_flags & (1 << i))
4833*4882a593Smuzhiyun seq_printf(m, "%s\n", trace_options[i]);
4834*4882a593Smuzhiyun else
4835*4882a593Smuzhiyun seq_printf(m, "no%s\n", trace_options[i]);
4836*4882a593Smuzhiyun }
4837*4882a593Smuzhiyun
4838*4882a593Smuzhiyun for (i = 0; trace_opts[i].name; i++) {
4839*4882a593Smuzhiyun if (tracer_flags & trace_opts[i].bit)
4840*4882a593Smuzhiyun seq_printf(m, "%s\n", trace_opts[i].name);
4841*4882a593Smuzhiyun else
4842*4882a593Smuzhiyun seq_printf(m, "no%s\n", trace_opts[i].name);
4843*4882a593Smuzhiyun }
4844*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
4845*4882a593Smuzhiyun
4846*4882a593Smuzhiyun return 0;
4847*4882a593Smuzhiyun }
4848*4882a593Smuzhiyun
__set_tracer_option(struct trace_array * tr,struct tracer_flags * tracer_flags,struct tracer_opt * opts,int neg)4849*4882a593Smuzhiyun static int __set_tracer_option(struct trace_array *tr,
4850*4882a593Smuzhiyun struct tracer_flags *tracer_flags,
4851*4882a593Smuzhiyun struct tracer_opt *opts, int neg)
4852*4882a593Smuzhiyun {
4853*4882a593Smuzhiyun struct tracer *trace = tracer_flags->trace;
4854*4882a593Smuzhiyun int ret;
4855*4882a593Smuzhiyun
4856*4882a593Smuzhiyun ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4857*4882a593Smuzhiyun if (ret)
4858*4882a593Smuzhiyun return ret;
4859*4882a593Smuzhiyun
4860*4882a593Smuzhiyun if (neg)
4861*4882a593Smuzhiyun tracer_flags->val &= ~opts->bit;
4862*4882a593Smuzhiyun else
4863*4882a593Smuzhiyun tracer_flags->val |= opts->bit;
4864*4882a593Smuzhiyun return 0;
4865*4882a593Smuzhiyun }
4866*4882a593Smuzhiyun
4867*4882a593Smuzhiyun /* Try to assign a tracer specific option */
set_tracer_option(struct trace_array * tr,char * cmp,int neg)4868*4882a593Smuzhiyun static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4869*4882a593Smuzhiyun {
4870*4882a593Smuzhiyun struct tracer *trace = tr->current_trace;
4871*4882a593Smuzhiyun struct tracer_flags *tracer_flags = trace->flags;
4872*4882a593Smuzhiyun struct tracer_opt *opts = NULL;
4873*4882a593Smuzhiyun int i;
4874*4882a593Smuzhiyun
4875*4882a593Smuzhiyun for (i = 0; tracer_flags->opts[i].name; i++) {
4876*4882a593Smuzhiyun opts = &tracer_flags->opts[i];
4877*4882a593Smuzhiyun
4878*4882a593Smuzhiyun if (strcmp(cmp, opts->name) == 0)
4879*4882a593Smuzhiyun return __set_tracer_option(tr, trace->flags, opts, neg);
4880*4882a593Smuzhiyun }
4881*4882a593Smuzhiyun
4882*4882a593Smuzhiyun return -EINVAL;
4883*4882a593Smuzhiyun }
4884*4882a593Smuzhiyun
4885*4882a593Smuzhiyun /* Some tracers require overwrite to stay enabled */
trace_keep_overwrite(struct tracer * tracer,u32 mask,int set)4886*4882a593Smuzhiyun int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4887*4882a593Smuzhiyun {
4888*4882a593Smuzhiyun if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4889*4882a593Smuzhiyun return -1;
4890*4882a593Smuzhiyun
4891*4882a593Smuzhiyun return 0;
4892*4882a593Smuzhiyun }
4893*4882a593Smuzhiyun
set_tracer_flag(struct trace_array * tr,unsigned int mask,int enabled)4894*4882a593Smuzhiyun int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4895*4882a593Smuzhiyun {
4896*4882a593Smuzhiyun int *map;
4897*4882a593Smuzhiyun
4898*4882a593Smuzhiyun if ((mask == TRACE_ITER_RECORD_TGID) ||
4899*4882a593Smuzhiyun (mask == TRACE_ITER_RECORD_CMD))
4900*4882a593Smuzhiyun lockdep_assert_held(&event_mutex);
4901*4882a593Smuzhiyun
4902*4882a593Smuzhiyun /* do nothing if flag is already set */
4903*4882a593Smuzhiyun if (!!(tr->trace_flags & mask) == !!enabled)
4904*4882a593Smuzhiyun return 0;
4905*4882a593Smuzhiyun
4906*4882a593Smuzhiyun /* Give the tracer a chance to approve the change */
4907*4882a593Smuzhiyun if (tr->current_trace->flag_changed)
4908*4882a593Smuzhiyun if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4909*4882a593Smuzhiyun return -EINVAL;
4910*4882a593Smuzhiyun
4911*4882a593Smuzhiyun if (enabled)
4912*4882a593Smuzhiyun tr->trace_flags |= mask;
4913*4882a593Smuzhiyun else
4914*4882a593Smuzhiyun tr->trace_flags &= ~mask;
4915*4882a593Smuzhiyun
4916*4882a593Smuzhiyun if (mask == TRACE_ITER_RECORD_CMD)
4917*4882a593Smuzhiyun trace_event_enable_cmd_record(enabled);
4918*4882a593Smuzhiyun
4919*4882a593Smuzhiyun if (mask == TRACE_ITER_RECORD_TGID) {
4920*4882a593Smuzhiyun if (!tgid_map) {
4921*4882a593Smuzhiyun tgid_map_max = pid_max;
4922*4882a593Smuzhiyun map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
4923*4882a593Smuzhiyun GFP_KERNEL);
4924*4882a593Smuzhiyun
4925*4882a593Smuzhiyun /*
4926*4882a593Smuzhiyun * Pairs with smp_load_acquire() in
4927*4882a593Smuzhiyun * trace_find_tgid_ptr() to ensure that if it observes
4928*4882a593Smuzhiyun * the tgid_map we just allocated then it also observes
4929*4882a593Smuzhiyun * the corresponding tgid_map_max value.
4930*4882a593Smuzhiyun */
4931*4882a593Smuzhiyun smp_store_release(&tgid_map, map);
4932*4882a593Smuzhiyun }
4933*4882a593Smuzhiyun if (!tgid_map) {
4934*4882a593Smuzhiyun tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4935*4882a593Smuzhiyun return -ENOMEM;
4936*4882a593Smuzhiyun }
4937*4882a593Smuzhiyun
4938*4882a593Smuzhiyun trace_event_enable_tgid_record(enabled);
4939*4882a593Smuzhiyun }
4940*4882a593Smuzhiyun
4941*4882a593Smuzhiyun if (mask == TRACE_ITER_EVENT_FORK)
4942*4882a593Smuzhiyun trace_event_follow_fork(tr, enabled);
4943*4882a593Smuzhiyun
4944*4882a593Smuzhiyun if (mask == TRACE_ITER_FUNC_FORK)
4945*4882a593Smuzhiyun ftrace_pid_follow_fork(tr, enabled);
4946*4882a593Smuzhiyun
4947*4882a593Smuzhiyun if (mask == TRACE_ITER_OVERWRITE) {
4948*4882a593Smuzhiyun ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4949*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
4950*4882a593Smuzhiyun ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4951*4882a593Smuzhiyun #endif
4952*4882a593Smuzhiyun }
4953*4882a593Smuzhiyun
4954*4882a593Smuzhiyun if (mask == TRACE_ITER_PRINTK) {
4955*4882a593Smuzhiyun trace_printk_start_stop_comm(enabled);
4956*4882a593Smuzhiyun trace_printk_control(enabled);
4957*4882a593Smuzhiyun }
4958*4882a593Smuzhiyun
4959*4882a593Smuzhiyun return 0;
4960*4882a593Smuzhiyun }
4961*4882a593Smuzhiyun
trace_set_options(struct trace_array * tr,char * option)4962*4882a593Smuzhiyun int trace_set_options(struct trace_array *tr, char *option)
4963*4882a593Smuzhiyun {
4964*4882a593Smuzhiyun char *cmp;
4965*4882a593Smuzhiyun int neg = 0;
4966*4882a593Smuzhiyun int ret;
4967*4882a593Smuzhiyun size_t orig_len = strlen(option);
4968*4882a593Smuzhiyun int len;
4969*4882a593Smuzhiyun
4970*4882a593Smuzhiyun cmp = strstrip(option);
4971*4882a593Smuzhiyun
4972*4882a593Smuzhiyun len = str_has_prefix(cmp, "no");
4973*4882a593Smuzhiyun if (len)
4974*4882a593Smuzhiyun neg = 1;
4975*4882a593Smuzhiyun
4976*4882a593Smuzhiyun cmp += len;
4977*4882a593Smuzhiyun
4978*4882a593Smuzhiyun mutex_lock(&event_mutex);
4979*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
4980*4882a593Smuzhiyun
4981*4882a593Smuzhiyun ret = match_string(trace_options, -1, cmp);
4982*4882a593Smuzhiyun /* If no option could be set, test the specific tracer options */
4983*4882a593Smuzhiyun if (ret < 0)
4984*4882a593Smuzhiyun ret = set_tracer_option(tr, cmp, neg);
4985*4882a593Smuzhiyun else
4986*4882a593Smuzhiyun ret = set_tracer_flag(tr, 1 << ret, !neg);
4987*4882a593Smuzhiyun
4988*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
4989*4882a593Smuzhiyun mutex_unlock(&event_mutex);
4990*4882a593Smuzhiyun
4991*4882a593Smuzhiyun /*
4992*4882a593Smuzhiyun * If the first trailing whitespace is replaced with '\0' by strstrip,
4993*4882a593Smuzhiyun * turn it back into a space.
4994*4882a593Smuzhiyun */
4995*4882a593Smuzhiyun if (orig_len > strlen(option))
4996*4882a593Smuzhiyun option[strlen(option)] = ' ';
4997*4882a593Smuzhiyun
4998*4882a593Smuzhiyun return ret;
4999*4882a593Smuzhiyun }
5000*4882a593Smuzhiyun
apply_trace_boot_options(void)5001*4882a593Smuzhiyun static void __init apply_trace_boot_options(void)
5002*4882a593Smuzhiyun {
5003*4882a593Smuzhiyun char *buf = trace_boot_options_buf;
5004*4882a593Smuzhiyun char *option;
5005*4882a593Smuzhiyun
5006*4882a593Smuzhiyun while (true) {
5007*4882a593Smuzhiyun option = strsep(&buf, ",");
5008*4882a593Smuzhiyun
5009*4882a593Smuzhiyun if (!option)
5010*4882a593Smuzhiyun break;
5011*4882a593Smuzhiyun
5012*4882a593Smuzhiyun if (*option)
5013*4882a593Smuzhiyun trace_set_options(&global_trace, option);
5014*4882a593Smuzhiyun
5015*4882a593Smuzhiyun /* Put back the comma to allow this to be called again */
5016*4882a593Smuzhiyun if (buf)
5017*4882a593Smuzhiyun *(buf - 1) = ',';
5018*4882a593Smuzhiyun }
5019*4882a593Smuzhiyun }
5020*4882a593Smuzhiyun
5021*4882a593Smuzhiyun static ssize_t
tracing_trace_options_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)5022*4882a593Smuzhiyun tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5023*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
5024*4882a593Smuzhiyun {
5025*4882a593Smuzhiyun struct seq_file *m = filp->private_data;
5026*4882a593Smuzhiyun struct trace_array *tr = m->private;
5027*4882a593Smuzhiyun char buf[64];
5028*4882a593Smuzhiyun int ret;
5029*4882a593Smuzhiyun
5030*4882a593Smuzhiyun if (cnt >= sizeof(buf))
5031*4882a593Smuzhiyun return -EINVAL;
5032*4882a593Smuzhiyun
5033*4882a593Smuzhiyun if (copy_from_user(buf, ubuf, cnt))
5034*4882a593Smuzhiyun return -EFAULT;
5035*4882a593Smuzhiyun
5036*4882a593Smuzhiyun buf[cnt] = 0;
5037*4882a593Smuzhiyun
5038*4882a593Smuzhiyun ret = trace_set_options(tr, buf);
5039*4882a593Smuzhiyun if (ret < 0)
5040*4882a593Smuzhiyun return ret;
5041*4882a593Smuzhiyun
5042*4882a593Smuzhiyun *ppos += cnt;
5043*4882a593Smuzhiyun
5044*4882a593Smuzhiyun return cnt;
5045*4882a593Smuzhiyun }
5046*4882a593Smuzhiyun
tracing_trace_options_open(struct inode * inode,struct file * file)5047*4882a593Smuzhiyun static int tracing_trace_options_open(struct inode *inode, struct file *file)
5048*4882a593Smuzhiyun {
5049*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
5050*4882a593Smuzhiyun int ret;
5051*4882a593Smuzhiyun
5052*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
5053*4882a593Smuzhiyun if (ret)
5054*4882a593Smuzhiyun return ret;
5055*4882a593Smuzhiyun
5056*4882a593Smuzhiyun ret = single_open(file, tracing_trace_options_show, inode->i_private);
5057*4882a593Smuzhiyun if (ret < 0)
5058*4882a593Smuzhiyun trace_array_put(tr);
5059*4882a593Smuzhiyun
5060*4882a593Smuzhiyun return ret;
5061*4882a593Smuzhiyun }
5062*4882a593Smuzhiyun
5063*4882a593Smuzhiyun static const struct file_operations tracing_iter_fops = {
5064*4882a593Smuzhiyun .open = tracing_trace_options_open,
5065*4882a593Smuzhiyun .read = seq_read,
5066*4882a593Smuzhiyun .llseek = seq_lseek,
5067*4882a593Smuzhiyun .release = tracing_single_release_tr,
5068*4882a593Smuzhiyun .write = tracing_trace_options_write,
5069*4882a593Smuzhiyun };
5070*4882a593Smuzhiyun
5071*4882a593Smuzhiyun static const char readme_msg[] =
5072*4882a593Smuzhiyun "tracing mini-HOWTO:\n\n"
5073*4882a593Smuzhiyun "# echo 0 > tracing_on : quick way to disable tracing\n"
5074*4882a593Smuzhiyun "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5075*4882a593Smuzhiyun " Important files:\n"
5076*4882a593Smuzhiyun " trace\t\t\t- The static contents of the buffer\n"
5077*4882a593Smuzhiyun "\t\t\t To clear the buffer write into this file: echo > trace\n"
5078*4882a593Smuzhiyun " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5079*4882a593Smuzhiyun " current_tracer\t- function and latency tracers\n"
5080*4882a593Smuzhiyun " available_tracers\t- list of configured tracers for current_tracer\n"
5081*4882a593Smuzhiyun " error_log\t- error log for failed commands (that support it)\n"
5082*4882a593Smuzhiyun " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5083*4882a593Smuzhiyun " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5084*4882a593Smuzhiyun " trace_clock\t\t-change the clock used to order events\n"
5085*4882a593Smuzhiyun " local: Per cpu clock but may not be synced across CPUs\n"
5086*4882a593Smuzhiyun " global: Synced across CPUs but slows tracing down.\n"
5087*4882a593Smuzhiyun " counter: Not a clock, but just an increment\n"
5088*4882a593Smuzhiyun " uptime: Jiffy counter from time of boot\n"
5089*4882a593Smuzhiyun " perf: Same clock that perf events use\n"
5090*4882a593Smuzhiyun #ifdef CONFIG_X86_64
5091*4882a593Smuzhiyun " x86-tsc: TSC cycle counter\n"
5092*4882a593Smuzhiyun #endif
5093*4882a593Smuzhiyun "\n timestamp_mode\t-view the mode used to timestamp events\n"
5094*4882a593Smuzhiyun " delta: Delta difference against a buffer-wide timestamp\n"
5095*4882a593Smuzhiyun " absolute: Absolute (standalone) timestamp\n"
5096*4882a593Smuzhiyun "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5097*4882a593Smuzhiyun "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5098*4882a593Smuzhiyun " tracing_cpumask\t- Limit which CPUs to trace\n"
5099*4882a593Smuzhiyun " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5100*4882a593Smuzhiyun "\t\t\t Remove sub-buffer with rmdir\n"
5101*4882a593Smuzhiyun " trace_options\t\t- Set format or modify how tracing happens\n"
5102*4882a593Smuzhiyun "\t\t\t Disable an option by prefixing 'no' to the\n"
5103*4882a593Smuzhiyun "\t\t\t option name\n"
5104*4882a593Smuzhiyun " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5105*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_FTRACE
5106*4882a593Smuzhiyun "\n available_filter_functions - list of functions that can be filtered on\n"
5107*4882a593Smuzhiyun " set_ftrace_filter\t- echo function name in here to only trace these\n"
5108*4882a593Smuzhiyun "\t\t\t functions\n"
5109*4882a593Smuzhiyun "\t accepts: func_full_name or glob-matching-pattern\n"
5110*4882a593Smuzhiyun "\t modules: Can select a group via module\n"
5111*4882a593Smuzhiyun "\t Format: :mod:<module-name>\n"
5112*4882a593Smuzhiyun "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5113*4882a593Smuzhiyun "\t triggers: a command to perform when function is hit\n"
5114*4882a593Smuzhiyun "\t Format: <function>:<trigger>[:count]\n"
5115*4882a593Smuzhiyun "\t trigger: traceon, traceoff\n"
5116*4882a593Smuzhiyun "\t\t enable_event:<system>:<event>\n"
5117*4882a593Smuzhiyun "\t\t disable_event:<system>:<event>\n"
5118*4882a593Smuzhiyun #ifdef CONFIG_STACKTRACE
5119*4882a593Smuzhiyun "\t\t stacktrace\n"
5120*4882a593Smuzhiyun #endif
5121*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
5122*4882a593Smuzhiyun "\t\t snapshot\n"
5123*4882a593Smuzhiyun #endif
5124*4882a593Smuzhiyun "\t\t dump\n"
5125*4882a593Smuzhiyun "\t\t cpudump\n"
5126*4882a593Smuzhiyun "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5127*4882a593Smuzhiyun "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5128*4882a593Smuzhiyun "\t The first one will disable tracing every time do_fault is hit\n"
5129*4882a593Smuzhiyun "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5130*4882a593Smuzhiyun "\t The first time do trap is hit and it disables tracing, the\n"
5131*4882a593Smuzhiyun "\t counter will decrement to 2. If tracing is already disabled,\n"
5132*4882a593Smuzhiyun "\t the counter will not decrement. It only decrements when the\n"
5133*4882a593Smuzhiyun "\t trigger did work\n"
5134*4882a593Smuzhiyun "\t To remove trigger without count:\n"
5135*4882a593Smuzhiyun "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5136*4882a593Smuzhiyun "\t To remove trigger with a count:\n"
5137*4882a593Smuzhiyun "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5138*4882a593Smuzhiyun " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5139*4882a593Smuzhiyun "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5140*4882a593Smuzhiyun "\t modules: Can select a group via module command :mod:\n"
5141*4882a593Smuzhiyun "\t Does not accept triggers\n"
5142*4882a593Smuzhiyun #endif /* CONFIG_DYNAMIC_FTRACE */
5143*4882a593Smuzhiyun #ifdef CONFIG_FUNCTION_TRACER
5144*4882a593Smuzhiyun " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5145*4882a593Smuzhiyun "\t\t (function)\n"
5146*4882a593Smuzhiyun " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5147*4882a593Smuzhiyun "\t\t (function)\n"
5148*4882a593Smuzhiyun #endif
5149*4882a593Smuzhiyun #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5150*4882a593Smuzhiyun " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5151*4882a593Smuzhiyun " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5152*4882a593Smuzhiyun " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5153*4882a593Smuzhiyun #endif
5154*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
5155*4882a593Smuzhiyun "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5156*4882a593Smuzhiyun "\t\t\t snapshot buffer. Read the contents for more\n"
5157*4882a593Smuzhiyun "\t\t\t information\n"
5158*4882a593Smuzhiyun #endif
5159*4882a593Smuzhiyun #ifdef CONFIG_STACK_TRACER
5160*4882a593Smuzhiyun " stack_trace\t\t- Shows the max stack trace when active\n"
5161*4882a593Smuzhiyun " stack_max_size\t- Shows current max stack size that was traced\n"
5162*4882a593Smuzhiyun "\t\t\t Write into this file to reset the max size (trigger a\n"
5163*4882a593Smuzhiyun "\t\t\t new trace)\n"
5164*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_FTRACE
5165*4882a593Smuzhiyun " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5166*4882a593Smuzhiyun "\t\t\t traces\n"
5167*4882a593Smuzhiyun #endif
5168*4882a593Smuzhiyun #endif /* CONFIG_STACK_TRACER */
5169*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_EVENTS
5170*4882a593Smuzhiyun " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5171*4882a593Smuzhiyun "\t\t\t Write into this file to define/undefine new trace events.\n"
5172*4882a593Smuzhiyun #endif
5173*4882a593Smuzhiyun #ifdef CONFIG_KPROBE_EVENTS
5174*4882a593Smuzhiyun " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5175*4882a593Smuzhiyun "\t\t\t Write into this file to define/undefine new trace events.\n"
5176*4882a593Smuzhiyun #endif
5177*4882a593Smuzhiyun #ifdef CONFIG_UPROBE_EVENTS
5178*4882a593Smuzhiyun " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5179*4882a593Smuzhiyun "\t\t\t Write into this file to define/undefine new trace events.\n"
5180*4882a593Smuzhiyun #endif
5181*4882a593Smuzhiyun #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5182*4882a593Smuzhiyun "\t accepts: event-definitions (one definition per line)\n"
5183*4882a593Smuzhiyun "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5184*4882a593Smuzhiyun "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5185*4882a593Smuzhiyun #ifdef CONFIG_HIST_TRIGGERS
5186*4882a593Smuzhiyun "\t s:[synthetic/]<event> <field> [<field>]\n"
5187*4882a593Smuzhiyun #endif
5188*4882a593Smuzhiyun "\t -:[<group>/]<event>\n"
5189*4882a593Smuzhiyun #ifdef CONFIG_KPROBE_EVENTS
5190*4882a593Smuzhiyun "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5191*4882a593Smuzhiyun "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5192*4882a593Smuzhiyun #endif
5193*4882a593Smuzhiyun #ifdef CONFIG_UPROBE_EVENTS
5194*4882a593Smuzhiyun " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5195*4882a593Smuzhiyun #endif
5196*4882a593Smuzhiyun "\t args: <name>=fetcharg[:type]\n"
5197*4882a593Smuzhiyun "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5198*4882a593Smuzhiyun #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5199*4882a593Smuzhiyun "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5200*4882a593Smuzhiyun #else
5201*4882a593Smuzhiyun "\t $stack<index>, $stack, $retval, $comm,\n"
5202*4882a593Smuzhiyun #endif
5203*4882a593Smuzhiyun "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5204*4882a593Smuzhiyun "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5205*4882a593Smuzhiyun "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5206*4882a593Smuzhiyun "\t <type>\\[<array-size>\\]\n"
5207*4882a593Smuzhiyun #ifdef CONFIG_HIST_TRIGGERS
5208*4882a593Smuzhiyun "\t field: <stype> <name>;\n"
5209*4882a593Smuzhiyun "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5210*4882a593Smuzhiyun "\t [unsigned] char/int/long\n"
5211*4882a593Smuzhiyun #endif
5212*4882a593Smuzhiyun #endif
5213*4882a593Smuzhiyun " events/\t\t- Directory containing all trace event subsystems:\n"
5214*4882a593Smuzhiyun " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5215*4882a593Smuzhiyun " events/<system>/\t- Directory containing all trace events for <system>:\n"
5216*4882a593Smuzhiyun " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5217*4882a593Smuzhiyun "\t\t\t events\n"
5218*4882a593Smuzhiyun " filter\t\t- If set, only events passing filter are traced\n"
5219*4882a593Smuzhiyun " events/<system>/<event>/\t- Directory containing control files for\n"
5220*4882a593Smuzhiyun "\t\t\t <event>:\n"
5221*4882a593Smuzhiyun " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5222*4882a593Smuzhiyun " filter\t\t- If set, only events passing filter are traced\n"
5223*4882a593Smuzhiyun " trigger\t\t- If set, a command to perform when event is hit\n"
5224*4882a593Smuzhiyun "\t Format: <trigger>[:count][if <filter>]\n"
5225*4882a593Smuzhiyun "\t trigger: traceon, traceoff\n"
5226*4882a593Smuzhiyun "\t enable_event:<system>:<event>\n"
5227*4882a593Smuzhiyun "\t disable_event:<system>:<event>\n"
5228*4882a593Smuzhiyun #ifdef CONFIG_HIST_TRIGGERS
5229*4882a593Smuzhiyun "\t enable_hist:<system>:<event>\n"
5230*4882a593Smuzhiyun "\t disable_hist:<system>:<event>\n"
5231*4882a593Smuzhiyun #endif
5232*4882a593Smuzhiyun #ifdef CONFIG_STACKTRACE
5233*4882a593Smuzhiyun "\t\t stacktrace\n"
5234*4882a593Smuzhiyun #endif
5235*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
5236*4882a593Smuzhiyun "\t\t snapshot\n"
5237*4882a593Smuzhiyun #endif
5238*4882a593Smuzhiyun #ifdef CONFIG_HIST_TRIGGERS
5239*4882a593Smuzhiyun "\t\t hist (see below)\n"
5240*4882a593Smuzhiyun #endif
5241*4882a593Smuzhiyun "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5242*4882a593Smuzhiyun "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5243*4882a593Smuzhiyun "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5244*4882a593Smuzhiyun "\t events/block/block_unplug/trigger\n"
5245*4882a593Smuzhiyun "\t The first disables tracing every time block_unplug is hit.\n"
5246*4882a593Smuzhiyun "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5247*4882a593Smuzhiyun "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5248*4882a593Smuzhiyun "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5249*4882a593Smuzhiyun "\t Like function triggers, the counter is only decremented if it\n"
5250*4882a593Smuzhiyun "\t enabled or disabled tracing.\n"
5251*4882a593Smuzhiyun "\t To remove a trigger without a count:\n"
5252*4882a593Smuzhiyun "\t echo '!<trigger> > <system>/<event>/trigger\n"
5253*4882a593Smuzhiyun "\t To remove a trigger with a count:\n"
5254*4882a593Smuzhiyun "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5255*4882a593Smuzhiyun "\t Filters can be ignored when removing a trigger.\n"
5256*4882a593Smuzhiyun #ifdef CONFIG_HIST_TRIGGERS
5257*4882a593Smuzhiyun " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5258*4882a593Smuzhiyun "\t Format: hist:keys=<field1[,field2,...]>\n"
5259*4882a593Smuzhiyun "\t [:values=<field1[,field2,...]>]\n"
5260*4882a593Smuzhiyun "\t [:sort=<field1[,field2,...]>]\n"
5261*4882a593Smuzhiyun "\t [:size=#entries]\n"
5262*4882a593Smuzhiyun "\t [:pause][:continue][:clear]\n"
5263*4882a593Smuzhiyun "\t [:name=histname1]\n"
5264*4882a593Smuzhiyun "\t [:<handler>.<action>]\n"
5265*4882a593Smuzhiyun "\t [if <filter>]\n\n"
5266*4882a593Smuzhiyun "\t Note, special fields can be used as well:\n"
5267*4882a593Smuzhiyun "\t common_timestamp - to record current timestamp\n"
5268*4882a593Smuzhiyun "\t common_cpu - to record the CPU the event happened on\n"
5269*4882a593Smuzhiyun "\n"
5270*4882a593Smuzhiyun "\t When a matching event is hit, an entry is added to a hash\n"
5271*4882a593Smuzhiyun "\t table using the key(s) and value(s) named, and the value of a\n"
5272*4882a593Smuzhiyun "\t sum called 'hitcount' is incremented. Keys and values\n"
5273*4882a593Smuzhiyun "\t correspond to fields in the event's format description. Keys\n"
5274*4882a593Smuzhiyun "\t can be any field, or the special string 'stacktrace'.\n"
5275*4882a593Smuzhiyun "\t Compound keys consisting of up to two fields can be specified\n"
5276*4882a593Smuzhiyun "\t by the 'keys' keyword. Values must correspond to numeric\n"
5277*4882a593Smuzhiyun "\t fields. Sort keys consisting of up to two fields can be\n"
5278*4882a593Smuzhiyun "\t specified using the 'sort' keyword. The sort direction can\n"
5279*4882a593Smuzhiyun "\t be modified by appending '.descending' or '.ascending' to a\n"
5280*4882a593Smuzhiyun "\t sort field. The 'size' parameter can be used to specify more\n"
5281*4882a593Smuzhiyun "\t or fewer than the default 2048 entries for the hashtable size.\n"
5282*4882a593Smuzhiyun "\t If a hist trigger is given a name using the 'name' parameter,\n"
5283*4882a593Smuzhiyun "\t its histogram data will be shared with other triggers of the\n"
5284*4882a593Smuzhiyun "\t same name, and trigger hits will update this common data.\n\n"
5285*4882a593Smuzhiyun "\t Reading the 'hist' file for the event will dump the hash\n"
5286*4882a593Smuzhiyun "\t table in its entirety to stdout. If there are multiple hist\n"
5287*4882a593Smuzhiyun "\t triggers attached to an event, there will be a table for each\n"
5288*4882a593Smuzhiyun "\t trigger in the output. The table displayed for a named\n"
5289*4882a593Smuzhiyun "\t trigger will be the same as any other instance having the\n"
5290*4882a593Smuzhiyun "\t same name. The default format used to display a given field\n"
5291*4882a593Smuzhiyun "\t can be modified by appending any of the following modifiers\n"
5292*4882a593Smuzhiyun "\t to the field name, as applicable:\n\n"
5293*4882a593Smuzhiyun "\t .hex display a number as a hex value\n"
5294*4882a593Smuzhiyun "\t .sym display an address as a symbol\n"
5295*4882a593Smuzhiyun "\t .sym-offset display an address as a symbol and offset\n"
5296*4882a593Smuzhiyun "\t .execname display a common_pid as a program name\n"
5297*4882a593Smuzhiyun "\t .syscall display a syscall id as a syscall name\n"
5298*4882a593Smuzhiyun "\t .log2 display log2 value rather than raw number\n"
5299*4882a593Smuzhiyun "\t .usecs display a common_timestamp in microseconds\n\n"
5300*4882a593Smuzhiyun "\t The 'pause' parameter can be used to pause an existing hist\n"
5301*4882a593Smuzhiyun "\t trigger or to start a hist trigger but not log any events\n"
5302*4882a593Smuzhiyun "\t until told to do so. 'continue' can be used to start or\n"
5303*4882a593Smuzhiyun "\t restart a paused hist trigger.\n\n"
5304*4882a593Smuzhiyun "\t The 'clear' parameter will clear the contents of a running\n"
5305*4882a593Smuzhiyun "\t hist trigger and leave its current paused/active state\n"
5306*4882a593Smuzhiyun "\t unchanged.\n\n"
5307*4882a593Smuzhiyun "\t The enable_hist and disable_hist triggers can be used to\n"
5308*4882a593Smuzhiyun "\t have one event conditionally start and stop another event's\n"
5309*4882a593Smuzhiyun "\t already-attached hist trigger. The syntax is analogous to\n"
5310*4882a593Smuzhiyun "\t the enable_event and disable_event triggers.\n\n"
5311*4882a593Smuzhiyun "\t Hist trigger handlers and actions are executed whenever a\n"
5312*4882a593Smuzhiyun "\t a histogram entry is added or updated. They take the form:\n\n"
5313*4882a593Smuzhiyun "\t <handler>.<action>\n\n"
5314*4882a593Smuzhiyun "\t The available handlers are:\n\n"
5315*4882a593Smuzhiyun "\t onmatch(matching.event) - invoke on addition or update\n"
5316*4882a593Smuzhiyun "\t onmax(var) - invoke if var exceeds current max\n"
5317*4882a593Smuzhiyun "\t onchange(var) - invoke action if var changes\n\n"
5318*4882a593Smuzhiyun "\t The available actions are:\n\n"
5319*4882a593Smuzhiyun "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5320*4882a593Smuzhiyun "\t save(field,...) - save current event fields\n"
5321*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
5322*4882a593Smuzhiyun "\t snapshot() - snapshot the trace buffer\n\n"
5323*4882a593Smuzhiyun #endif
5324*4882a593Smuzhiyun #ifdef CONFIG_SYNTH_EVENTS
5325*4882a593Smuzhiyun " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5326*4882a593Smuzhiyun "\t Write into this file to define/undefine new synthetic events.\n"
5327*4882a593Smuzhiyun "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5328*4882a593Smuzhiyun #endif
5329*4882a593Smuzhiyun #endif
5330*4882a593Smuzhiyun ;
5331*4882a593Smuzhiyun
5332*4882a593Smuzhiyun static ssize_t
tracing_readme_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)5333*4882a593Smuzhiyun tracing_readme_read(struct file *filp, char __user *ubuf,
5334*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
5335*4882a593Smuzhiyun {
5336*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos,
5337*4882a593Smuzhiyun readme_msg, strlen(readme_msg));
5338*4882a593Smuzhiyun }
5339*4882a593Smuzhiyun
5340*4882a593Smuzhiyun static const struct file_operations tracing_readme_fops = {
5341*4882a593Smuzhiyun .open = tracing_open_generic,
5342*4882a593Smuzhiyun .read = tracing_readme_read,
5343*4882a593Smuzhiyun .llseek = generic_file_llseek,
5344*4882a593Smuzhiyun };
5345*4882a593Smuzhiyun
saved_tgids_next(struct seq_file * m,void * v,loff_t * pos)5346*4882a593Smuzhiyun static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5347*4882a593Smuzhiyun {
5348*4882a593Smuzhiyun int pid = ++(*pos);
5349*4882a593Smuzhiyun
5350*4882a593Smuzhiyun return trace_find_tgid_ptr(pid);
5351*4882a593Smuzhiyun }
5352*4882a593Smuzhiyun
saved_tgids_start(struct seq_file * m,loff_t * pos)5353*4882a593Smuzhiyun static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5354*4882a593Smuzhiyun {
5355*4882a593Smuzhiyun int pid = *pos;
5356*4882a593Smuzhiyun
5357*4882a593Smuzhiyun return trace_find_tgid_ptr(pid);
5358*4882a593Smuzhiyun }
5359*4882a593Smuzhiyun
saved_tgids_stop(struct seq_file * m,void * v)5360*4882a593Smuzhiyun static void saved_tgids_stop(struct seq_file *m, void *v)
5361*4882a593Smuzhiyun {
5362*4882a593Smuzhiyun }
5363*4882a593Smuzhiyun
saved_tgids_show(struct seq_file * m,void * v)5364*4882a593Smuzhiyun static int saved_tgids_show(struct seq_file *m, void *v)
5365*4882a593Smuzhiyun {
5366*4882a593Smuzhiyun int *entry = (int *)v;
5367*4882a593Smuzhiyun int pid = entry - tgid_map;
5368*4882a593Smuzhiyun int tgid = *entry;
5369*4882a593Smuzhiyun
5370*4882a593Smuzhiyun if (tgid == 0)
5371*4882a593Smuzhiyun return SEQ_SKIP;
5372*4882a593Smuzhiyun
5373*4882a593Smuzhiyun seq_printf(m, "%d %d\n", pid, tgid);
5374*4882a593Smuzhiyun return 0;
5375*4882a593Smuzhiyun }
5376*4882a593Smuzhiyun
5377*4882a593Smuzhiyun static const struct seq_operations tracing_saved_tgids_seq_ops = {
5378*4882a593Smuzhiyun .start = saved_tgids_start,
5379*4882a593Smuzhiyun .stop = saved_tgids_stop,
5380*4882a593Smuzhiyun .next = saved_tgids_next,
5381*4882a593Smuzhiyun .show = saved_tgids_show,
5382*4882a593Smuzhiyun };
5383*4882a593Smuzhiyun
tracing_saved_tgids_open(struct inode * inode,struct file * filp)5384*4882a593Smuzhiyun static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5385*4882a593Smuzhiyun {
5386*4882a593Smuzhiyun int ret;
5387*4882a593Smuzhiyun
5388*4882a593Smuzhiyun ret = tracing_check_open_get_tr(NULL);
5389*4882a593Smuzhiyun if (ret)
5390*4882a593Smuzhiyun return ret;
5391*4882a593Smuzhiyun
5392*4882a593Smuzhiyun return seq_open(filp, &tracing_saved_tgids_seq_ops);
5393*4882a593Smuzhiyun }
5394*4882a593Smuzhiyun
5395*4882a593Smuzhiyun
5396*4882a593Smuzhiyun static const struct file_operations tracing_saved_tgids_fops = {
5397*4882a593Smuzhiyun .open = tracing_saved_tgids_open,
5398*4882a593Smuzhiyun .read = seq_read,
5399*4882a593Smuzhiyun .llseek = seq_lseek,
5400*4882a593Smuzhiyun .release = seq_release,
5401*4882a593Smuzhiyun };
5402*4882a593Smuzhiyun
saved_cmdlines_next(struct seq_file * m,void * v,loff_t * pos)5403*4882a593Smuzhiyun static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5404*4882a593Smuzhiyun {
5405*4882a593Smuzhiyun unsigned int *ptr = v;
5406*4882a593Smuzhiyun
5407*4882a593Smuzhiyun if (*pos || m->count)
5408*4882a593Smuzhiyun ptr++;
5409*4882a593Smuzhiyun
5410*4882a593Smuzhiyun (*pos)++;
5411*4882a593Smuzhiyun
5412*4882a593Smuzhiyun for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5413*4882a593Smuzhiyun ptr++) {
5414*4882a593Smuzhiyun if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5415*4882a593Smuzhiyun continue;
5416*4882a593Smuzhiyun
5417*4882a593Smuzhiyun return ptr;
5418*4882a593Smuzhiyun }
5419*4882a593Smuzhiyun
5420*4882a593Smuzhiyun return NULL;
5421*4882a593Smuzhiyun }
5422*4882a593Smuzhiyun
saved_cmdlines_start(struct seq_file * m,loff_t * pos)5423*4882a593Smuzhiyun static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5424*4882a593Smuzhiyun {
5425*4882a593Smuzhiyun void *v;
5426*4882a593Smuzhiyun loff_t l = 0;
5427*4882a593Smuzhiyun
5428*4882a593Smuzhiyun preempt_disable();
5429*4882a593Smuzhiyun arch_spin_lock(&trace_cmdline_lock);
5430*4882a593Smuzhiyun
5431*4882a593Smuzhiyun v = &savedcmd->map_cmdline_to_pid[0];
5432*4882a593Smuzhiyun while (l <= *pos) {
5433*4882a593Smuzhiyun v = saved_cmdlines_next(m, v, &l);
5434*4882a593Smuzhiyun if (!v)
5435*4882a593Smuzhiyun return NULL;
5436*4882a593Smuzhiyun }
5437*4882a593Smuzhiyun
5438*4882a593Smuzhiyun return v;
5439*4882a593Smuzhiyun }
5440*4882a593Smuzhiyun
saved_cmdlines_stop(struct seq_file * m,void * v)5441*4882a593Smuzhiyun static void saved_cmdlines_stop(struct seq_file *m, void *v)
5442*4882a593Smuzhiyun {
5443*4882a593Smuzhiyun arch_spin_unlock(&trace_cmdline_lock);
5444*4882a593Smuzhiyun preempt_enable();
5445*4882a593Smuzhiyun }
5446*4882a593Smuzhiyun
saved_cmdlines_show(struct seq_file * m,void * v)5447*4882a593Smuzhiyun static int saved_cmdlines_show(struct seq_file *m, void *v)
5448*4882a593Smuzhiyun {
5449*4882a593Smuzhiyun char buf[TASK_COMM_LEN];
5450*4882a593Smuzhiyun unsigned int *pid = v;
5451*4882a593Smuzhiyun
5452*4882a593Smuzhiyun __trace_find_cmdline(*pid, buf);
5453*4882a593Smuzhiyun seq_printf(m, "%d %s\n", *pid, buf);
5454*4882a593Smuzhiyun return 0;
5455*4882a593Smuzhiyun }
5456*4882a593Smuzhiyun
5457*4882a593Smuzhiyun static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5458*4882a593Smuzhiyun .start = saved_cmdlines_start,
5459*4882a593Smuzhiyun .next = saved_cmdlines_next,
5460*4882a593Smuzhiyun .stop = saved_cmdlines_stop,
5461*4882a593Smuzhiyun .show = saved_cmdlines_show,
5462*4882a593Smuzhiyun };
5463*4882a593Smuzhiyun
tracing_saved_cmdlines_open(struct inode * inode,struct file * filp)5464*4882a593Smuzhiyun static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5465*4882a593Smuzhiyun {
5466*4882a593Smuzhiyun int ret;
5467*4882a593Smuzhiyun
5468*4882a593Smuzhiyun ret = tracing_check_open_get_tr(NULL);
5469*4882a593Smuzhiyun if (ret)
5470*4882a593Smuzhiyun return ret;
5471*4882a593Smuzhiyun
5472*4882a593Smuzhiyun return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5473*4882a593Smuzhiyun }
5474*4882a593Smuzhiyun
5475*4882a593Smuzhiyun static const struct file_operations tracing_saved_cmdlines_fops = {
5476*4882a593Smuzhiyun .open = tracing_saved_cmdlines_open,
5477*4882a593Smuzhiyun .read = seq_read,
5478*4882a593Smuzhiyun .llseek = seq_lseek,
5479*4882a593Smuzhiyun .release = seq_release,
5480*4882a593Smuzhiyun };
5481*4882a593Smuzhiyun
5482*4882a593Smuzhiyun static ssize_t
tracing_saved_cmdlines_size_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)5483*4882a593Smuzhiyun tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5484*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
5485*4882a593Smuzhiyun {
5486*4882a593Smuzhiyun char buf[64];
5487*4882a593Smuzhiyun int r;
5488*4882a593Smuzhiyun
5489*4882a593Smuzhiyun preempt_disable();
5490*4882a593Smuzhiyun arch_spin_lock(&trace_cmdline_lock);
5491*4882a593Smuzhiyun r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5492*4882a593Smuzhiyun arch_spin_unlock(&trace_cmdline_lock);
5493*4882a593Smuzhiyun preempt_enable();
5494*4882a593Smuzhiyun
5495*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5496*4882a593Smuzhiyun }
5497*4882a593Smuzhiyun
free_saved_cmdlines_buffer(struct saved_cmdlines_buffer * s)5498*4882a593Smuzhiyun static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5499*4882a593Smuzhiyun {
5500*4882a593Smuzhiyun kfree(s->saved_cmdlines);
5501*4882a593Smuzhiyun kfree(s->map_cmdline_to_pid);
5502*4882a593Smuzhiyun kfree(s);
5503*4882a593Smuzhiyun }
5504*4882a593Smuzhiyun
tracing_resize_saved_cmdlines(unsigned int val)5505*4882a593Smuzhiyun static int tracing_resize_saved_cmdlines(unsigned int val)
5506*4882a593Smuzhiyun {
5507*4882a593Smuzhiyun struct saved_cmdlines_buffer *s, *savedcmd_temp;
5508*4882a593Smuzhiyun
5509*4882a593Smuzhiyun s = kmalloc(sizeof(*s), GFP_KERNEL);
5510*4882a593Smuzhiyun if (!s)
5511*4882a593Smuzhiyun return -ENOMEM;
5512*4882a593Smuzhiyun
5513*4882a593Smuzhiyun if (allocate_cmdlines_buffer(val, s) < 0) {
5514*4882a593Smuzhiyun kfree(s);
5515*4882a593Smuzhiyun return -ENOMEM;
5516*4882a593Smuzhiyun }
5517*4882a593Smuzhiyun
5518*4882a593Smuzhiyun preempt_disable();
5519*4882a593Smuzhiyun arch_spin_lock(&trace_cmdline_lock);
5520*4882a593Smuzhiyun savedcmd_temp = savedcmd;
5521*4882a593Smuzhiyun savedcmd = s;
5522*4882a593Smuzhiyun arch_spin_unlock(&trace_cmdline_lock);
5523*4882a593Smuzhiyun preempt_enable();
5524*4882a593Smuzhiyun free_saved_cmdlines_buffer(savedcmd_temp);
5525*4882a593Smuzhiyun
5526*4882a593Smuzhiyun return 0;
5527*4882a593Smuzhiyun }
5528*4882a593Smuzhiyun
5529*4882a593Smuzhiyun static ssize_t
tracing_saved_cmdlines_size_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)5530*4882a593Smuzhiyun tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5531*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
5532*4882a593Smuzhiyun {
5533*4882a593Smuzhiyun unsigned long val;
5534*4882a593Smuzhiyun int ret;
5535*4882a593Smuzhiyun
5536*4882a593Smuzhiyun ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5537*4882a593Smuzhiyun if (ret)
5538*4882a593Smuzhiyun return ret;
5539*4882a593Smuzhiyun
5540*4882a593Smuzhiyun /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5541*4882a593Smuzhiyun if (!val || val > PID_MAX_DEFAULT)
5542*4882a593Smuzhiyun return -EINVAL;
5543*4882a593Smuzhiyun
5544*4882a593Smuzhiyun ret = tracing_resize_saved_cmdlines((unsigned int)val);
5545*4882a593Smuzhiyun if (ret < 0)
5546*4882a593Smuzhiyun return ret;
5547*4882a593Smuzhiyun
5548*4882a593Smuzhiyun *ppos += cnt;
5549*4882a593Smuzhiyun
5550*4882a593Smuzhiyun return cnt;
5551*4882a593Smuzhiyun }
5552*4882a593Smuzhiyun
5553*4882a593Smuzhiyun static const struct file_operations tracing_saved_cmdlines_size_fops = {
5554*4882a593Smuzhiyun .open = tracing_open_generic,
5555*4882a593Smuzhiyun .read = tracing_saved_cmdlines_size_read,
5556*4882a593Smuzhiyun .write = tracing_saved_cmdlines_size_write,
5557*4882a593Smuzhiyun };
5558*4882a593Smuzhiyun
5559*4882a593Smuzhiyun #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5560*4882a593Smuzhiyun static union trace_eval_map_item *
update_eval_map(union trace_eval_map_item * ptr)5561*4882a593Smuzhiyun update_eval_map(union trace_eval_map_item *ptr)
5562*4882a593Smuzhiyun {
5563*4882a593Smuzhiyun if (!ptr->map.eval_string) {
5564*4882a593Smuzhiyun if (ptr->tail.next) {
5565*4882a593Smuzhiyun ptr = ptr->tail.next;
5566*4882a593Smuzhiyun /* Set ptr to the next real item (skip head) */
5567*4882a593Smuzhiyun ptr++;
5568*4882a593Smuzhiyun } else
5569*4882a593Smuzhiyun return NULL;
5570*4882a593Smuzhiyun }
5571*4882a593Smuzhiyun return ptr;
5572*4882a593Smuzhiyun }
5573*4882a593Smuzhiyun
eval_map_next(struct seq_file * m,void * v,loff_t * pos)5574*4882a593Smuzhiyun static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5575*4882a593Smuzhiyun {
5576*4882a593Smuzhiyun union trace_eval_map_item *ptr = v;
5577*4882a593Smuzhiyun
5578*4882a593Smuzhiyun /*
5579*4882a593Smuzhiyun * Paranoid! If ptr points to end, we don't want to increment past it.
5580*4882a593Smuzhiyun * This really should never happen.
5581*4882a593Smuzhiyun */
5582*4882a593Smuzhiyun (*pos)++;
5583*4882a593Smuzhiyun ptr = update_eval_map(ptr);
5584*4882a593Smuzhiyun if (WARN_ON_ONCE(!ptr))
5585*4882a593Smuzhiyun return NULL;
5586*4882a593Smuzhiyun
5587*4882a593Smuzhiyun ptr++;
5588*4882a593Smuzhiyun ptr = update_eval_map(ptr);
5589*4882a593Smuzhiyun
5590*4882a593Smuzhiyun return ptr;
5591*4882a593Smuzhiyun }
5592*4882a593Smuzhiyun
eval_map_start(struct seq_file * m,loff_t * pos)5593*4882a593Smuzhiyun static void *eval_map_start(struct seq_file *m, loff_t *pos)
5594*4882a593Smuzhiyun {
5595*4882a593Smuzhiyun union trace_eval_map_item *v;
5596*4882a593Smuzhiyun loff_t l = 0;
5597*4882a593Smuzhiyun
5598*4882a593Smuzhiyun mutex_lock(&trace_eval_mutex);
5599*4882a593Smuzhiyun
5600*4882a593Smuzhiyun v = trace_eval_maps;
5601*4882a593Smuzhiyun if (v)
5602*4882a593Smuzhiyun v++;
5603*4882a593Smuzhiyun
5604*4882a593Smuzhiyun while (v && l < *pos) {
5605*4882a593Smuzhiyun v = eval_map_next(m, v, &l);
5606*4882a593Smuzhiyun }
5607*4882a593Smuzhiyun
5608*4882a593Smuzhiyun return v;
5609*4882a593Smuzhiyun }
5610*4882a593Smuzhiyun
eval_map_stop(struct seq_file * m,void * v)5611*4882a593Smuzhiyun static void eval_map_stop(struct seq_file *m, void *v)
5612*4882a593Smuzhiyun {
5613*4882a593Smuzhiyun mutex_unlock(&trace_eval_mutex);
5614*4882a593Smuzhiyun }
5615*4882a593Smuzhiyun
eval_map_show(struct seq_file * m,void * v)5616*4882a593Smuzhiyun static int eval_map_show(struct seq_file *m, void *v)
5617*4882a593Smuzhiyun {
5618*4882a593Smuzhiyun union trace_eval_map_item *ptr = v;
5619*4882a593Smuzhiyun
5620*4882a593Smuzhiyun seq_printf(m, "%s %ld (%s)\n",
5621*4882a593Smuzhiyun ptr->map.eval_string, ptr->map.eval_value,
5622*4882a593Smuzhiyun ptr->map.system);
5623*4882a593Smuzhiyun
5624*4882a593Smuzhiyun return 0;
5625*4882a593Smuzhiyun }
5626*4882a593Smuzhiyun
5627*4882a593Smuzhiyun static const struct seq_operations tracing_eval_map_seq_ops = {
5628*4882a593Smuzhiyun .start = eval_map_start,
5629*4882a593Smuzhiyun .next = eval_map_next,
5630*4882a593Smuzhiyun .stop = eval_map_stop,
5631*4882a593Smuzhiyun .show = eval_map_show,
5632*4882a593Smuzhiyun };
5633*4882a593Smuzhiyun
tracing_eval_map_open(struct inode * inode,struct file * filp)5634*4882a593Smuzhiyun static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5635*4882a593Smuzhiyun {
5636*4882a593Smuzhiyun int ret;
5637*4882a593Smuzhiyun
5638*4882a593Smuzhiyun ret = tracing_check_open_get_tr(NULL);
5639*4882a593Smuzhiyun if (ret)
5640*4882a593Smuzhiyun return ret;
5641*4882a593Smuzhiyun
5642*4882a593Smuzhiyun return seq_open(filp, &tracing_eval_map_seq_ops);
5643*4882a593Smuzhiyun }
5644*4882a593Smuzhiyun
5645*4882a593Smuzhiyun static const struct file_operations tracing_eval_map_fops = {
5646*4882a593Smuzhiyun .open = tracing_eval_map_open,
5647*4882a593Smuzhiyun .read = seq_read,
5648*4882a593Smuzhiyun .llseek = seq_lseek,
5649*4882a593Smuzhiyun .release = seq_release,
5650*4882a593Smuzhiyun };
5651*4882a593Smuzhiyun
5652*4882a593Smuzhiyun static inline union trace_eval_map_item *
trace_eval_jmp_to_tail(union trace_eval_map_item * ptr)5653*4882a593Smuzhiyun trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5654*4882a593Smuzhiyun {
5655*4882a593Smuzhiyun /* Return tail of array given the head */
5656*4882a593Smuzhiyun return ptr + ptr->head.length + 1;
5657*4882a593Smuzhiyun }
5658*4882a593Smuzhiyun
5659*4882a593Smuzhiyun static void
trace_insert_eval_map_file(struct module * mod,struct trace_eval_map ** start,int len)5660*4882a593Smuzhiyun trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5661*4882a593Smuzhiyun int len)
5662*4882a593Smuzhiyun {
5663*4882a593Smuzhiyun struct trace_eval_map **stop;
5664*4882a593Smuzhiyun struct trace_eval_map **map;
5665*4882a593Smuzhiyun union trace_eval_map_item *map_array;
5666*4882a593Smuzhiyun union trace_eval_map_item *ptr;
5667*4882a593Smuzhiyun
5668*4882a593Smuzhiyun stop = start + len;
5669*4882a593Smuzhiyun
5670*4882a593Smuzhiyun /*
5671*4882a593Smuzhiyun * The trace_eval_maps contains the map plus a head and tail item,
5672*4882a593Smuzhiyun * where the head holds the module and length of array, and the
5673*4882a593Smuzhiyun * tail holds a pointer to the next list.
5674*4882a593Smuzhiyun */
5675*4882a593Smuzhiyun map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5676*4882a593Smuzhiyun if (!map_array) {
5677*4882a593Smuzhiyun pr_warn("Unable to allocate trace eval mapping\n");
5678*4882a593Smuzhiyun return;
5679*4882a593Smuzhiyun }
5680*4882a593Smuzhiyun
5681*4882a593Smuzhiyun mutex_lock(&trace_eval_mutex);
5682*4882a593Smuzhiyun
5683*4882a593Smuzhiyun if (!trace_eval_maps)
5684*4882a593Smuzhiyun trace_eval_maps = map_array;
5685*4882a593Smuzhiyun else {
5686*4882a593Smuzhiyun ptr = trace_eval_maps;
5687*4882a593Smuzhiyun for (;;) {
5688*4882a593Smuzhiyun ptr = trace_eval_jmp_to_tail(ptr);
5689*4882a593Smuzhiyun if (!ptr->tail.next)
5690*4882a593Smuzhiyun break;
5691*4882a593Smuzhiyun ptr = ptr->tail.next;
5692*4882a593Smuzhiyun
5693*4882a593Smuzhiyun }
5694*4882a593Smuzhiyun ptr->tail.next = map_array;
5695*4882a593Smuzhiyun }
5696*4882a593Smuzhiyun map_array->head.mod = mod;
5697*4882a593Smuzhiyun map_array->head.length = len;
5698*4882a593Smuzhiyun map_array++;
5699*4882a593Smuzhiyun
5700*4882a593Smuzhiyun for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5701*4882a593Smuzhiyun map_array->map = **map;
5702*4882a593Smuzhiyun map_array++;
5703*4882a593Smuzhiyun }
5704*4882a593Smuzhiyun memset(map_array, 0, sizeof(*map_array));
5705*4882a593Smuzhiyun
5706*4882a593Smuzhiyun mutex_unlock(&trace_eval_mutex);
5707*4882a593Smuzhiyun }
5708*4882a593Smuzhiyun
trace_create_eval_file(struct dentry * d_tracer)5709*4882a593Smuzhiyun static void trace_create_eval_file(struct dentry *d_tracer)
5710*4882a593Smuzhiyun {
5711*4882a593Smuzhiyun trace_create_file("eval_map", 0444, d_tracer,
5712*4882a593Smuzhiyun NULL, &tracing_eval_map_fops);
5713*4882a593Smuzhiyun }
5714*4882a593Smuzhiyun
5715*4882a593Smuzhiyun #else /* CONFIG_TRACE_EVAL_MAP_FILE */
trace_create_eval_file(struct dentry * d_tracer)5716*4882a593Smuzhiyun static inline void trace_create_eval_file(struct dentry *d_tracer) { }
trace_insert_eval_map_file(struct module * mod,struct trace_eval_map ** start,int len)5717*4882a593Smuzhiyun static inline void trace_insert_eval_map_file(struct module *mod,
5718*4882a593Smuzhiyun struct trace_eval_map **start, int len) { }
5719*4882a593Smuzhiyun #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5720*4882a593Smuzhiyun
trace_insert_eval_map(struct module * mod,struct trace_eval_map ** start,int len)5721*4882a593Smuzhiyun static void trace_insert_eval_map(struct module *mod,
5722*4882a593Smuzhiyun struct trace_eval_map **start, int len)
5723*4882a593Smuzhiyun {
5724*4882a593Smuzhiyun struct trace_eval_map **map;
5725*4882a593Smuzhiyun
5726*4882a593Smuzhiyun if (len <= 0)
5727*4882a593Smuzhiyun return;
5728*4882a593Smuzhiyun
5729*4882a593Smuzhiyun map = start;
5730*4882a593Smuzhiyun
5731*4882a593Smuzhiyun trace_event_eval_update(map, len);
5732*4882a593Smuzhiyun
5733*4882a593Smuzhiyun trace_insert_eval_map_file(mod, start, len);
5734*4882a593Smuzhiyun }
5735*4882a593Smuzhiyun
5736*4882a593Smuzhiyun static ssize_t
tracing_set_trace_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)5737*4882a593Smuzhiyun tracing_set_trace_read(struct file *filp, char __user *ubuf,
5738*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
5739*4882a593Smuzhiyun {
5740*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
5741*4882a593Smuzhiyun char buf[MAX_TRACER_SIZE+2];
5742*4882a593Smuzhiyun int r;
5743*4882a593Smuzhiyun
5744*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
5745*4882a593Smuzhiyun r = sprintf(buf, "%s\n", tr->current_trace->name);
5746*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
5747*4882a593Smuzhiyun
5748*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5749*4882a593Smuzhiyun }
5750*4882a593Smuzhiyun
tracer_init(struct tracer * t,struct trace_array * tr)5751*4882a593Smuzhiyun int tracer_init(struct tracer *t, struct trace_array *tr)
5752*4882a593Smuzhiyun {
5753*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->array_buffer);
5754*4882a593Smuzhiyun return t->init(tr);
5755*4882a593Smuzhiyun }
5756*4882a593Smuzhiyun
set_buffer_entries(struct array_buffer * buf,unsigned long val)5757*4882a593Smuzhiyun static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5758*4882a593Smuzhiyun {
5759*4882a593Smuzhiyun int cpu;
5760*4882a593Smuzhiyun
5761*4882a593Smuzhiyun for_each_tracing_cpu(cpu)
5762*4882a593Smuzhiyun per_cpu_ptr(buf->data, cpu)->entries = val;
5763*4882a593Smuzhiyun }
5764*4882a593Smuzhiyun
5765*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
5766*4882a593Smuzhiyun /* resize @tr's buffer to the size of @size_tr's entries */
resize_buffer_duplicate_size(struct array_buffer * trace_buf,struct array_buffer * size_buf,int cpu_id)5767*4882a593Smuzhiyun static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5768*4882a593Smuzhiyun struct array_buffer *size_buf, int cpu_id)
5769*4882a593Smuzhiyun {
5770*4882a593Smuzhiyun int cpu, ret = 0;
5771*4882a593Smuzhiyun
5772*4882a593Smuzhiyun if (cpu_id == RING_BUFFER_ALL_CPUS) {
5773*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
5774*4882a593Smuzhiyun ret = ring_buffer_resize(trace_buf->buffer,
5775*4882a593Smuzhiyun per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5776*4882a593Smuzhiyun if (ret < 0)
5777*4882a593Smuzhiyun break;
5778*4882a593Smuzhiyun per_cpu_ptr(trace_buf->data, cpu)->entries =
5779*4882a593Smuzhiyun per_cpu_ptr(size_buf->data, cpu)->entries;
5780*4882a593Smuzhiyun }
5781*4882a593Smuzhiyun } else {
5782*4882a593Smuzhiyun ret = ring_buffer_resize(trace_buf->buffer,
5783*4882a593Smuzhiyun per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5784*4882a593Smuzhiyun if (ret == 0)
5785*4882a593Smuzhiyun per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5786*4882a593Smuzhiyun per_cpu_ptr(size_buf->data, cpu_id)->entries;
5787*4882a593Smuzhiyun }
5788*4882a593Smuzhiyun
5789*4882a593Smuzhiyun return ret;
5790*4882a593Smuzhiyun }
5791*4882a593Smuzhiyun #endif /* CONFIG_TRACER_MAX_TRACE */
5792*4882a593Smuzhiyun
__tracing_resize_ring_buffer(struct trace_array * tr,unsigned long size,int cpu)5793*4882a593Smuzhiyun static int __tracing_resize_ring_buffer(struct trace_array *tr,
5794*4882a593Smuzhiyun unsigned long size, int cpu)
5795*4882a593Smuzhiyun {
5796*4882a593Smuzhiyun int ret;
5797*4882a593Smuzhiyun
5798*4882a593Smuzhiyun /*
5799*4882a593Smuzhiyun * If kernel or user changes the size of the ring buffer
5800*4882a593Smuzhiyun * we use the size that was given, and we can forget about
5801*4882a593Smuzhiyun * expanding it later.
5802*4882a593Smuzhiyun */
5803*4882a593Smuzhiyun ring_buffer_expanded = true;
5804*4882a593Smuzhiyun
5805*4882a593Smuzhiyun /* May be called before buffers are initialized */
5806*4882a593Smuzhiyun if (!tr->array_buffer.buffer)
5807*4882a593Smuzhiyun return 0;
5808*4882a593Smuzhiyun
5809*4882a593Smuzhiyun ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5810*4882a593Smuzhiyun if (ret < 0)
5811*4882a593Smuzhiyun return ret;
5812*4882a593Smuzhiyun
5813*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
5814*4882a593Smuzhiyun if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5815*4882a593Smuzhiyun !tr->current_trace->use_max_tr)
5816*4882a593Smuzhiyun goto out;
5817*4882a593Smuzhiyun
5818*4882a593Smuzhiyun ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5819*4882a593Smuzhiyun if (ret < 0) {
5820*4882a593Smuzhiyun int r = resize_buffer_duplicate_size(&tr->array_buffer,
5821*4882a593Smuzhiyun &tr->array_buffer, cpu);
5822*4882a593Smuzhiyun if (r < 0) {
5823*4882a593Smuzhiyun /*
5824*4882a593Smuzhiyun * AARGH! We are left with different
5825*4882a593Smuzhiyun * size max buffer!!!!
5826*4882a593Smuzhiyun * The max buffer is our "snapshot" buffer.
5827*4882a593Smuzhiyun * When a tracer needs a snapshot (one of the
5828*4882a593Smuzhiyun * latency tracers), it swaps the max buffer
5829*4882a593Smuzhiyun * with the saved snap shot. We succeeded to
5830*4882a593Smuzhiyun * update the size of the main buffer, but failed to
5831*4882a593Smuzhiyun * update the size of the max buffer. But when we tried
5832*4882a593Smuzhiyun * to reset the main buffer to the original size, we
5833*4882a593Smuzhiyun * failed there too. This is very unlikely to
5834*4882a593Smuzhiyun * happen, but if it does, warn and kill all
5835*4882a593Smuzhiyun * tracing.
5836*4882a593Smuzhiyun */
5837*4882a593Smuzhiyun WARN_ON(1);
5838*4882a593Smuzhiyun tracing_disabled = 1;
5839*4882a593Smuzhiyun }
5840*4882a593Smuzhiyun return ret;
5841*4882a593Smuzhiyun }
5842*4882a593Smuzhiyun
5843*4882a593Smuzhiyun if (cpu == RING_BUFFER_ALL_CPUS)
5844*4882a593Smuzhiyun set_buffer_entries(&tr->max_buffer, size);
5845*4882a593Smuzhiyun else
5846*4882a593Smuzhiyun per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5847*4882a593Smuzhiyun
5848*4882a593Smuzhiyun out:
5849*4882a593Smuzhiyun #endif /* CONFIG_TRACER_MAX_TRACE */
5850*4882a593Smuzhiyun
5851*4882a593Smuzhiyun if (cpu == RING_BUFFER_ALL_CPUS)
5852*4882a593Smuzhiyun set_buffer_entries(&tr->array_buffer, size);
5853*4882a593Smuzhiyun else
5854*4882a593Smuzhiyun per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5855*4882a593Smuzhiyun
5856*4882a593Smuzhiyun return ret;
5857*4882a593Smuzhiyun }
5858*4882a593Smuzhiyun
tracing_resize_ring_buffer(struct trace_array * tr,unsigned long size,int cpu_id)5859*4882a593Smuzhiyun ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5860*4882a593Smuzhiyun unsigned long size, int cpu_id)
5861*4882a593Smuzhiyun {
5862*4882a593Smuzhiyun int ret = size;
5863*4882a593Smuzhiyun
5864*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
5865*4882a593Smuzhiyun
5866*4882a593Smuzhiyun if (cpu_id != RING_BUFFER_ALL_CPUS) {
5867*4882a593Smuzhiyun /* make sure, this cpu is enabled in the mask */
5868*4882a593Smuzhiyun if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5869*4882a593Smuzhiyun ret = -EINVAL;
5870*4882a593Smuzhiyun goto out;
5871*4882a593Smuzhiyun }
5872*4882a593Smuzhiyun }
5873*4882a593Smuzhiyun
5874*4882a593Smuzhiyun ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5875*4882a593Smuzhiyun if (ret < 0)
5876*4882a593Smuzhiyun ret = -ENOMEM;
5877*4882a593Smuzhiyun
5878*4882a593Smuzhiyun out:
5879*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
5880*4882a593Smuzhiyun
5881*4882a593Smuzhiyun return ret;
5882*4882a593Smuzhiyun }
5883*4882a593Smuzhiyun
5884*4882a593Smuzhiyun
5885*4882a593Smuzhiyun /**
5886*4882a593Smuzhiyun * tracing_update_buffers - used by tracing facility to expand ring buffers
5887*4882a593Smuzhiyun *
5888*4882a593Smuzhiyun * To save on memory when the tracing is never used on a system with it
5889*4882a593Smuzhiyun * configured in. The ring buffers are set to a minimum size. But once
5890*4882a593Smuzhiyun * a user starts to use the tracing facility, then they need to grow
5891*4882a593Smuzhiyun * to their default size.
5892*4882a593Smuzhiyun *
5893*4882a593Smuzhiyun * This function is to be called when a tracer is about to be used.
5894*4882a593Smuzhiyun */
tracing_update_buffers(void)5895*4882a593Smuzhiyun int tracing_update_buffers(void)
5896*4882a593Smuzhiyun {
5897*4882a593Smuzhiyun int ret = 0;
5898*4882a593Smuzhiyun
5899*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
5900*4882a593Smuzhiyun if (!ring_buffer_expanded)
5901*4882a593Smuzhiyun ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5902*4882a593Smuzhiyun RING_BUFFER_ALL_CPUS);
5903*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
5904*4882a593Smuzhiyun
5905*4882a593Smuzhiyun return ret;
5906*4882a593Smuzhiyun }
5907*4882a593Smuzhiyun
5908*4882a593Smuzhiyun struct trace_option_dentry;
5909*4882a593Smuzhiyun
5910*4882a593Smuzhiyun static void
5911*4882a593Smuzhiyun create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5912*4882a593Smuzhiyun
5913*4882a593Smuzhiyun /*
5914*4882a593Smuzhiyun * Used to clear out the tracer before deletion of an instance.
5915*4882a593Smuzhiyun * Must have trace_types_lock held.
5916*4882a593Smuzhiyun */
tracing_set_nop(struct trace_array * tr)5917*4882a593Smuzhiyun static void tracing_set_nop(struct trace_array *tr)
5918*4882a593Smuzhiyun {
5919*4882a593Smuzhiyun if (tr->current_trace == &nop_trace)
5920*4882a593Smuzhiyun return;
5921*4882a593Smuzhiyun
5922*4882a593Smuzhiyun tr->current_trace->enabled--;
5923*4882a593Smuzhiyun
5924*4882a593Smuzhiyun if (tr->current_trace->reset)
5925*4882a593Smuzhiyun tr->current_trace->reset(tr);
5926*4882a593Smuzhiyun
5927*4882a593Smuzhiyun tr->current_trace = &nop_trace;
5928*4882a593Smuzhiyun }
5929*4882a593Smuzhiyun
5930*4882a593Smuzhiyun static bool tracer_options_updated;
5931*4882a593Smuzhiyun
add_tracer_options(struct trace_array * tr,struct tracer * t)5932*4882a593Smuzhiyun static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5933*4882a593Smuzhiyun {
5934*4882a593Smuzhiyun /* Only enable if the directory has been created already. */
5935*4882a593Smuzhiyun if (!tr->dir)
5936*4882a593Smuzhiyun return;
5937*4882a593Smuzhiyun
5938*4882a593Smuzhiyun /* Only create trace option files after update_tracer_options finish */
5939*4882a593Smuzhiyun if (!tracer_options_updated)
5940*4882a593Smuzhiyun return;
5941*4882a593Smuzhiyun
5942*4882a593Smuzhiyun create_trace_option_files(tr, t);
5943*4882a593Smuzhiyun }
5944*4882a593Smuzhiyun
tracing_set_tracer(struct trace_array * tr,const char * buf)5945*4882a593Smuzhiyun int tracing_set_tracer(struct trace_array *tr, const char *buf)
5946*4882a593Smuzhiyun {
5947*4882a593Smuzhiyun struct tracer *t;
5948*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
5949*4882a593Smuzhiyun bool had_max_tr;
5950*4882a593Smuzhiyun #endif
5951*4882a593Smuzhiyun int ret = 0;
5952*4882a593Smuzhiyun
5953*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
5954*4882a593Smuzhiyun
5955*4882a593Smuzhiyun if (!ring_buffer_expanded) {
5956*4882a593Smuzhiyun ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5957*4882a593Smuzhiyun RING_BUFFER_ALL_CPUS);
5958*4882a593Smuzhiyun if (ret < 0)
5959*4882a593Smuzhiyun goto out;
5960*4882a593Smuzhiyun ret = 0;
5961*4882a593Smuzhiyun }
5962*4882a593Smuzhiyun
5963*4882a593Smuzhiyun for (t = trace_types; t; t = t->next) {
5964*4882a593Smuzhiyun if (strcmp(t->name, buf) == 0)
5965*4882a593Smuzhiyun break;
5966*4882a593Smuzhiyun }
5967*4882a593Smuzhiyun if (!t) {
5968*4882a593Smuzhiyun ret = -EINVAL;
5969*4882a593Smuzhiyun goto out;
5970*4882a593Smuzhiyun }
5971*4882a593Smuzhiyun if (t == tr->current_trace)
5972*4882a593Smuzhiyun goto out;
5973*4882a593Smuzhiyun
5974*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
5975*4882a593Smuzhiyun if (t->use_max_tr) {
5976*4882a593Smuzhiyun local_irq_disable();
5977*4882a593Smuzhiyun arch_spin_lock(&tr->max_lock);
5978*4882a593Smuzhiyun if (tr->cond_snapshot)
5979*4882a593Smuzhiyun ret = -EBUSY;
5980*4882a593Smuzhiyun arch_spin_unlock(&tr->max_lock);
5981*4882a593Smuzhiyun local_irq_enable();
5982*4882a593Smuzhiyun if (ret)
5983*4882a593Smuzhiyun goto out;
5984*4882a593Smuzhiyun }
5985*4882a593Smuzhiyun #endif
5986*4882a593Smuzhiyun /* Some tracers won't work on kernel command line */
5987*4882a593Smuzhiyun if (system_state < SYSTEM_RUNNING && t->noboot) {
5988*4882a593Smuzhiyun pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5989*4882a593Smuzhiyun t->name);
5990*4882a593Smuzhiyun goto out;
5991*4882a593Smuzhiyun }
5992*4882a593Smuzhiyun
5993*4882a593Smuzhiyun /* Some tracers are only allowed for the top level buffer */
5994*4882a593Smuzhiyun if (!trace_ok_for_array(t, tr)) {
5995*4882a593Smuzhiyun ret = -EINVAL;
5996*4882a593Smuzhiyun goto out;
5997*4882a593Smuzhiyun }
5998*4882a593Smuzhiyun
5999*4882a593Smuzhiyun /* If trace pipe files are being read, we can't change the tracer */
6000*4882a593Smuzhiyun if (tr->trace_ref) {
6001*4882a593Smuzhiyun ret = -EBUSY;
6002*4882a593Smuzhiyun goto out;
6003*4882a593Smuzhiyun }
6004*4882a593Smuzhiyun
6005*4882a593Smuzhiyun trace_branch_disable();
6006*4882a593Smuzhiyun
6007*4882a593Smuzhiyun tr->current_trace->enabled--;
6008*4882a593Smuzhiyun
6009*4882a593Smuzhiyun if (tr->current_trace->reset)
6010*4882a593Smuzhiyun tr->current_trace->reset(tr);
6011*4882a593Smuzhiyun
6012*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
6013*4882a593Smuzhiyun had_max_tr = tr->current_trace->use_max_tr;
6014*4882a593Smuzhiyun
6015*4882a593Smuzhiyun /* Current trace needs to be nop_trace before synchronize_rcu */
6016*4882a593Smuzhiyun tr->current_trace = &nop_trace;
6017*4882a593Smuzhiyun
6018*4882a593Smuzhiyun if (had_max_tr && !t->use_max_tr) {
6019*4882a593Smuzhiyun /*
6020*4882a593Smuzhiyun * We need to make sure that the update_max_tr sees that
6021*4882a593Smuzhiyun * current_trace changed to nop_trace to keep it from
6022*4882a593Smuzhiyun * swapping the buffers after we resize it.
6023*4882a593Smuzhiyun * The update_max_tr is called from interrupts disabled
6024*4882a593Smuzhiyun * so a synchronized_sched() is sufficient.
6025*4882a593Smuzhiyun */
6026*4882a593Smuzhiyun synchronize_rcu();
6027*4882a593Smuzhiyun free_snapshot(tr);
6028*4882a593Smuzhiyun }
6029*4882a593Smuzhiyun
6030*4882a593Smuzhiyun if (t->use_max_tr && !tr->allocated_snapshot) {
6031*4882a593Smuzhiyun ret = tracing_alloc_snapshot_instance(tr);
6032*4882a593Smuzhiyun if (ret < 0)
6033*4882a593Smuzhiyun goto out;
6034*4882a593Smuzhiyun }
6035*4882a593Smuzhiyun #else
6036*4882a593Smuzhiyun tr->current_trace = &nop_trace;
6037*4882a593Smuzhiyun #endif
6038*4882a593Smuzhiyun
6039*4882a593Smuzhiyun if (t->init) {
6040*4882a593Smuzhiyun ret = tracer_init(t, tr);
6041*4882a593Smuzhiyun if (ret)
6042*4882a593Smuzhiyun goto out;
6043*4882a593Smuzhiyun }
6044*4882a593Smuzhiyun
6045*4882a593Smuzhiyun tr->current_trace = t;
6046*4882a593Smuzhiyun tr->current_trace->enabled++;
6047*4882a593Smuzhiyun trace_branch_enable(tr);
6048*4882a593Smuzhiyun out:
6049*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6050*4882a593Smuzhiyun
6051*4882a593Smuzhiyun return ret;
6052*4882a593Smuzhiyun }
6053*4882a593Smuzhiyun
6054*4882a593Smuzhiyun static ssize_t
tracing_set_trace_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6055*4882a593Smuzhiyun tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6056*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6057*4882a593Smuzhiyun {
6058*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
6059*4882a593Smuzhiyun char buf[MAX_TRACER_SIZE+1];
6060*4882a593Smuzhiyun int i;
6061*4882a593Smuzhiyun size_t ret;
6062*4882a593Smuzhiyun int err;
6063*4882a593Smuzhiyun
6064*4882a593Smuzhiyun ret = cnt;
6065*4882a593Smuzhiyun
6066*4882a593Smuzhiyun if (cnt > MAX_TRACER_SIZE)
6067*4882a593Smuzhiyun cnt = MAX_TRACER_SIZE;
6068*4882a593Smuzhiyun
6069*4882a593Smuzhiyun if (copy_from_user(buf, ubuf, cnt))
6070*4882a593Smuzhiyun return -EFAULT;
6071*4882a593Smuzhiyun
6072*4882a593Smuzhiyun buf[cnt] = 0;
6073*4882a593Smuzhiyun
6074*4882a593Smuzhiyun /* strip ending whitespace. */
6075*4882a593Smuzhiyun for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6076*4882a593Smuzhiyun buf[i] = 0;
6077*4882a593Smuzhiyun
6078*4882a593Smuzhiyun err = tracing_set_tracer(tr, buf);
6079*4882a593Smuzhiyun if (err)
6080*4882a593Smuzhiyun return err;
6081*4882a593Smuzhiyun
6082*4882a593Smuzhiyun *ppos += ret;
6083*4882a593Smuzhiyun
6084*4882a593Smuzhiyun return ret;
6085*4882a593Smuzhiyun }
6086*4882a593Smuzhiyun
6087*4882a593Smuzhiyun static ssize_t
tracing_nsecs_read(unsigned long * ptr,char __user * ubuf,size_t cnt,loff_t * ppos)6088*4882a593Smuzhiyun tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6089*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6090*4882a593Smuzhiyun {
6091*4882a593Smuzhiyun char buf[64];
6092*4882a593Smuzhiyun int r;
6093*4882a593Smuzhiyun
6094*4882a593Smuzhiyun r = snprintf(buf, sizeof(buf), "%ld\n",
6095*4882a593Smuzhiyun *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6096*4882a593Smuzhiyun if (r > sizeof(buf))
6097*4882a593Smuzhiyun r = sizeof(buf);
6098*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6099*4882a593Smuzhiyun }
6100*4882a593Smuzhiyun
6101*4882a593Smuzhiyun static ssize_t
tracing_nsecs_write(unsigned long * ptr,const char __user * ubuf,size_t cnt,loff_t * ppos)6102*4882a593Smuzhiyun tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6103*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6104*4882a593Smuzhiyun {
6105*4882a593Smuzhiyun unsigned long val;
6106*4882a593Smuzhiyun int ret;
6107*4882a593Smuzhiyun
6108*4882a593Smuzhiyun ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6109*4882a593Smuzhiyun if (ret)
6110*4882a593Smuzhiyun return ret;
6111*4882a593Smuzhiyun
6112*4882a593Smuzhiyun *ptr = val * 1000;
6113*4882a593Smuzhiyun
6114*4882a593Smuzhiyun return cnt;
6115*4882a593Smuzhiyun }
6116*4882a593Smuzhiyun
6117*4882a593Smuzhiyun static ssize_t
tracing_thresh_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6118*4882a593Smuzhiyun tracing_thresh_read(struct file *filp, char __user *ubuf,
6119*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6120*4882a593Smuzhiyun {
6121*4882a593Smuzhiyun return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6122*4882a593Smuzhiyun }
6123*4882a593Smuzhiyun
6124*4882a593Smuzhiyun static ssize_t
tracing_thresh_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6125*4882a593Smuzhiyun tracing_thresh_write(struct file *filp, const char __user *ubuf,
6126*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6127*4882a593Smuzhiyun {
6128*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
6129*4882a593Smuzhiyun int ret;
6130*4882a593Smuzhiyun
6131*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
6132*4882a593Smuzhiyun ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6133*4882a593Smuzhiyun if (ret < 0)
6134*4882a593Smuzhiyun goto out;
6135*4882a593Smuzhiyun
6136*4882a593Smuzhiyun if (tr->current_trace->update_thresh) {
6137*4882a593Smuzhiyun ret = tr->current_trace->update_thresh(tr);
6138*4882a593Smuzhiyun if (ret < 0)
6139*4882a593Smuzhiyun goto out;
6140*4882a593Smuzhiyun }
6141*4882a593Smuzhiyun
6142*4882a593Smuzhiyun ret = cnt;
6143*4882a593Smuzhiyun out:
6144*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6145*4882a593Smuzhiyun
6146*4882a593Smuzhiyun return ret;
6147*4882a593Smuzhiyun }
6148*4882a593Smuzhiyun
6149*4882a593Smuzhiyun #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6150*4882a593Smuzhiyun
6151*4882a593Smuzhiyun static ssize_t
tracing_max_lat_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6152*4882a593Smuzhiyun tracing_max_lat_read(struct file *filp, char __user *ubuf,
6153*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6154*4882a593Smuzhiyun {
6155*4882a593Smuzhiyun return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6156*4882a593Smuzhiyun }
6157*4882a593Smuzhiyun
6158*4882a593Smuzhiyun static ssize_t
tracing_max_lat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6159*4882a593Smuzhiyun tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6160*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6161*4882a593Smuzhiyun {
6162*4882a593Smuzhiyun return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6163*4882a593Smuzhiyun }
6164*4882a593Smuzhiyun
6165*4882a593Smuzhiyun #endif
6166*4882a593Smuzhiyun
tracing_open_pipe(struct inode * inode,struct file * filp)6167*4882a593Smuzhiyun static int tracing_open_pipe(struct inode *inode, struct file *filp)
6168*4882a593Smuzhiyun {
6169*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
6170*4882a593Smuzhiyun struct trace_iterator *iter;
6171*4882a593Smuzhiyun int ret;
6172*4882a593Smuzhiyun
6173*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
6174*4882a593Smuzhiyun if (ret)
6175*4882a593Smuzhiyun return ret;
6176*4882a593Smuzhiyun
6177*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
6178*4882a593Smuzhiyun
6179*4882a593Smuzhiyun /* create a buffer to store the information to pass to userspace */
6180*4882a593Smuzhiyun iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6181*4882a593Smuzhiyun if (!iter) {
6182*4882a593Smuzhiyun ret = -ENOMEM;
6183*4882a593Smuzhiyun __trace_array_put(tr);
6184*4882a593Smuzhiyun goto out;
6185*4882a593Smuzhiyun }
6186*4882a593Smuzhiyun
6187*4882a593Smuzhiyun trace_seq_init(&iter->seq);
6188*4882a593Smuzhiyun iter->trace = tr->current_trace;
6189*4882a593Smuzhiyun
6190*4882a593Smuzhiyun if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6191*4882a593Smuzhiyun ret = -ENOMEM;
6192*4882a593Smuzhiyun goto fail;
6193*4882a593Smuzhiyun }
6194*4882a593Smuzhiyun
6195*4882a593Smuzhiyun /* trace pipe does not show start of buffer */
6196*4882a593Smuzhiyun cpumask_setall(iter->started);
6197*4882a593Smuzhiyun
6198*4882a593Smuzhiyun if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6199*4882a593Smuzhiyun iter->iter_flags |= TRACE_FILE_LAT_FMT;
6200*4882a593Smuzhiyun
6201*4882a593Smuzhiyun /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6202*4882a593Smuzhiyun if (trace_clocks[tr->clock_id].in_ns)
6203*4882a593Smuzhiyun iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6204*4882a593Smuzhiyun
6205*4882a593Smuzhiyun iter->tr = tr;
6206*4882a593Smuzhiyun iter->array_buffer = &tr->array_buffer;
6207*4882a593Smuzhiyun iter->cpu_file = tracing_get_cpu(inode);
6208*4882a593Smuzhiyun mutex_init(&iter->mutex);
6209*4882a593Smuzhiyun filp->private_data = iter;
6210*4882a593Smuzhiyun
6211*4882a593Smuzhiyun if (iter->trace->pipe_open)
6212*4882a593Smuzhiyun iter->trace->pipe_open(iter);
6213*4882a593Smuzhiyun
6214*4882a593Smuzhiyun nonseekable_open(inode, filp);
6215*4882a593Smuzhiyun
6216*4882a593Smuzhiyun tr->trace_ref++;
6217*4882a593Smuzhiyun out:
6218*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6219*4882a593Smuzhiyun return ret;
6220*4882a593Smuzhiyun
6221*4882a593Smuzhiyun fail:
6222*4882a593Smuzhiyun kfree(iter);
6223*4882a593Smuzhiyun __trace_array_put(tr);
6224*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6225*4882a593Smuzhiyun return ret;
6226*4882a593Smuzhiyun }
6227*4882a593Smuzhiyun
tracing_release_pipe(struct inode * inode,struct file * file)6228*4882a593Smuzhiyun static int tracing_release_pipe(struct inode *inode, struct file *file)
6229*4882a593Smuzhiyun {
6230*4882a593Smuzhiyun struct trace_iterator *iter = file->private_data;
6231*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
6232*4882a593Smuzhiyun
6233*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
6234*4882a593Smuzhiyun
6235*4882a593Smuzhiyun tr->trace_ref--;
6236*4882a593Smuzhiyun
6237*4882a593Smuzhiyun if (iter->trace->pipe_close)
6238*4882a593Smuzhiyun iter->trace->pipe_close(iter);
6239*4882a593Smuzhiyun
6240*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6241*4882a593Smuzhiyun
6242*4882a593Smuzhiyun free_cpumask_var(iter->started);
6243*4882a593Smuzhiyun mutex_destroy(&iter->mutex);
6244*4882a593Smuzhiyun kfree(iter);
6245*4882a593Smuzhiyun
6246*4882a593Smuzhiyun trace_array_put(tr);
6247*4882a593Smuzhiyun
6248*4882a593Smuzhiyun return 0;
6249*4882a593Smuzhiyun }
6250*4882a593Smuzhiyun
6251*4882a593Smuzhiyun static __poll_t
trace_poll(struct trace_iterator * iter,struct file * filp,poll_table * poll_table)6252*4882a593Smuzhiyun trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6253*4882a593Smuzhiyun {
6254*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
6255*4882a593Smuzhiyun
6256*4882a593Smuzhiyun /* Iterators are static, they should be filled or empty */
6257*4882a593Smuzhiyun if (trace_buffer_iter(iter, iter->cpu_file))
6258*4882a593Smuzhiyun return EPOLLIN | EPOLLRDNORM;
6259*4882a593Smuzhiyun
6260*4882a593Smuzhiyun if (tr->trace_flags & TRACE_ITER_BLOCK)
6261*4882a593Smuzhiyun /*
6262*4882a593Smuzhiyun * Always select as readable when in blocking mode
6263*4882a593Smuzhiyun */
6264*4882a593Smuzhiyun return EPOLLIN | EPOLLRDNORM;
6265*4882a593Smuzhiyun else
6266*4882a593Smuzhiyun return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6267*4882a593Smuzhiyun filp, poll_table, iter->tr->buffer_percent);
6268*4882a593Smuzhiyun }
6269*4882a593Smuzhiyun
6270*4882a593Smuzhiyun static __poll_t
tracing_poll_pipe(struct file * filp,poll_table * poll_table)6271*4882a593Smuzhiyun tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6272*4882a593Smuzhiyun {
6273*4882a593Smuzhiyun struct trace_iterator *iter = filp->private_data;
6274*4882a593Smuzhiyun
6275*4882a593Smuzhiyun return trace_poll(iter, filp, poll_table);
6276*4882a593Smuzhiyun }
6277*4882a593Smuzhiyun
6278*4882a593Smuzhiyun /* Must be called with iter->mutex held. */
tracing_wait_pipe(struct file * filp)6279*4882a593Smuzhiyun static int tracing_wait_pipe(struct file *filp)
6280*4882a593Smuzhiyun {
6281*4882a593Smuzhiyun struct trace_iterator *iter = filp->private_data;
6282*4882a593Smuzhiyun int ret;
6283*4882a593Smuzhiyun
6284*4882a593Smuzhiyun while (trace_empty(iter)) {
6285*4882a593Smuzhiyun
6286*4882a593Smuzhiyun if ((filp->f_flags & O_NONBLOCK)) {
6287*4882a593Smuzhiyun return -EAGAIN;
6288*4882a593Smuzhiyun }
6289*4882a593Smuzhiyun
6290*4882a593Smuzhiyun /*
6291*4882a593Smuzhiyun * We block until we read something and tracing is disabled.
6292*4882a593Smuzhiyun * We still block if tracing is disabled, but we have never
6293*4882a593Smuzhiyun * read anything. This allows a user to cat this file, and
6294*4882a593Smuzhiyun * then enable tracing. But after we have read something,
6295*4882a593Smuzhiyun * we give an EOF when tracing is again disabled.
6296*4882a593Smuzhiyun *
6297*4882a593Smuzhiyun * iter->pos will be 0 if we haven't read anything.
6298*4882a593Smuzhiyun */
6299*4882a593Smuzhiyun if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6300*4882a593Smuzhiyun break;
6301*4882a593Smuzhiyun
6302*4882a593Smuzhiyun mutex_unlock(&iter->mutex);
6303*4882a593Smuzhiyun
6304*4882a593Smuzhiyun ret = wait_on_pipe(iter, 0);
6305*4882a593Smuzhiyun
6306*4882a593Smuzhiyun mutex_lock(&iter->mutex);
6307*4882a593Smuzhiyun
6308*4882a593Smuzhiyun if (ret)
6309*4882a593Smuzhiyun return ret;
6310*4882a593Smuzhiyun }
6311*4882a593Smuzhiyun
6312*4882a593Smuzhiyun return 1;
6313*4882a593Smuzhiyun }
6314*4882a593Smuzhiyun
6315*4882a593Smuzhiyun /*
6316*4882a593Smuzhiyun * Consumer reader.
6317*4882a593Smuzhiyun */
6318*4882a593Smuzhiyun static ssize_t
tracing_read_pipe(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6319*4882a593Smuzhiyun tracing_read_pipe(struct file *filp, char __user *ubuf,
6320*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6321*4882a593Smuzhiyun {
6322*4882a593Smuzhiyun struct trace_iterator *iter = filp->private_data;
6323*4882a593Smuzhiyun ssize_t sret;
6324*4882a593Smuzhiyun
6325*4882a593Smuzhiyun /*
6326*4882a593Smuzhiyun * Avoid more than one consumer on a single file descriptor
6327*4882a593Smuzhiyun * This is just a matter of traces coherency, the ring buffer itself
6328*4882a593Smuzhiyun * is protected.
6329*4882a593Smuzhiyun */
6330*4882a593Smuzhiyun mutex_lock(&iter->mutex);
6331*4882a593Smuzhiyun
6332*4882a593Smuzhiyun /* return any leftover data */
6333*4882a593Smuzhiyun sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6334*4882a593Smuzhiyun if (sret != -EBUSY)
6335*4882a593Smuzhiyun goto out;
6336*4882a593Smuzhiyun
6337*4882a593Smuzhiyun trace_seq_init(&iter->seq);
6338*4882a593Smuzhiyun
6339*4882a593Smuzhiyun if (iter->trace->read) {
6340*4882a593Smuzhiyun sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6341*4882a593Smuzhiyun if (sret)
6342*4882a593Smuzhiyun goto out;
6343*4882a593Smuzhiyun }
6344*4882a593Smuzhiyun
6345*4882a593Smuzhiyun waitagain:
6346*4882a593Smuzhiyun sret = tracing_wait_pipe(filp);
6347*4882a593Smuzhiyun if (sret <= 0)
6348*4882a593Smuzhiyun goto out;
6349*4882a593Smuzhiyun
6350*4882a593Smuzhiyun /* stop when tracing is finished */
6351*4882a593Smuzhiyun if (trace_empty(iter)) {
6352*4882a593Smuzhiyun sret = 0;
6353*4882a593Smuzhiyun goto out;
6354*4882a593Smuzhiyun }
6355*4882a593Smuzhiyun
6356*4882a593Smuzhiyun if (cnt >= PAGE_SIZE)
6357*4882a593Smuzhiyun cnt = PAGE_SIZE - 1;
6358*4882a593Smuzhiyun
6359*4882a593Smuzhiyun /* reset all but tr, trace, and overruns */
6360*4882a593Smuzhiyun memset(&iter->seq, 0,
6361*4882a593Smuzhiyun sizeof(struct trace_iterator) -
6362*4882a593Smuzhiyun offsetof(struct trace_iterator, seq));
6363*4882a593Smuzhiyun cpumask_clear(iter->started);
6364*4882a593Smuzhiyun trace_seq_init(&iter->seq);
6365*4882a593Smuzhiyun iter->pos = -1;
6366*4882a593Smuzhiyun
6367*4882a593Smuzhiyun trace_event_read_lock();
6368*4882a593Smuzhiyun trace_access_lock(iter->cpu_file);
6369*4882a593Smuzhiyun while (trace_find_next_entry_inc(iter) != NULL) {
6370*4882a593Smuzhiyun enum print_line_t ret;
6371*4882a593Smuzhiyun int save_len = iter->seq.seq.len;
6372*4882a593Smuzhiyun
6373*4882a593Smuzhiyun ret = print_trace_line(iter);
6374*4882a593Smuzhiyun if (ret == TRACE_TYPE_PARTIAL_LINE) {
6375*4882a593Smuzhiyun /* don't print partial lines */
6376*4882a593Smuzhiyun iter->seq.seq.len = save_len;
6377*4882a593Smuzhiyun break;
6378*4882a593Smuzhiyun }
6379*4882a593Smuzhiyun if (ret != TRACE_TYPE_NO_CONSUME)
6380*4882a593Smuzhiyun trace_consume(iter);
6381*4882a593Smuzhiyun
6382*4882a593Smuzhiyun if (trace_seq_used(&iter->seq) >= cnt)
6383*4882a593Smuzhiyun break;
6384*4882a593Smuzhiyun
6385*4882a593Smuzhiyun /*
6386*4882a593Smuzhiyun * Setting the full flag means we reached the trace_seq buffer
6387*4882a593Smuzhiyun * size and we should leave by partial output condition above.
6388*4882a593Smuzhiyun * One of the trace_seq_* functions is not used properly.
6389*4882a593Smuzhiyun */
6390*4882a593Smuzhiyun WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6391*4882a593Smuzhiyun iter->ent->type);
6392*4882a593Smuzhiyun }
6393*4882a593Smuzhiyun trace_access_unlock(iter->cpu_file);
6394*4882a593Smuzhiyun trace_event_read_unlock();
6395*4882a593Smuzhiyun
6396*4882a593Smuzhiyun /* Now copy what we have to the user */
6397*4882a593Smuzhiyun sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6398*4882a593Smuzhiyun if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6399*4882a593Smuzhiyun trace_seq_init(&iter->seq);
6400*4882a593Smuzhiyun
6401*4882a593Smuzhiyun /*
6402*4882a593Smuzhiyun * If there was nothing to send to user, in spite of consuming trace
6403*4882a593Smuzhiyun * entries, go back to wait for more entries.
6404*4882a593Smuzhiyun */
6405*4882a593Smuzhiyun if (sret == -EBUSY)
6406*4882a593Smuzhiyun goto waitagain;
6407*4882a593Smuzhiyun
6408*4882a593Smuzhiyun out:
6409*4882a593Smuzhiyun mutex_unlock(&iter->mutex);
6410*4882a593Smuzhiyun
6411*4882a593Smuzhiyun return sret;
6412*4882a593Smuzhiyun }
6413*4882a593Smuzhiyun
tracing_spd_release_pipe(struct splice_pipe_desc * spd,unsigned int idx)6414*4882a593Smuzhiyun static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6415*4882a593Smuzhiyun unsigned int idx)
6416*4882a593Smuzhiyun {
6417*4882a593Smuzhiyun __free_page(spd->pages[idx]);
6418*4882a593Smuzhiyun }
6419*4882a593Smuzhiyun
6420*4882a593Smuzhiyun static size_t
tracing_fill_pipe_page(size_t rem,struct trace_iterator * iter)6421*4882a593Smuzhiyun tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6422*4882a593Smuzhiyun {
6423*4882a593Smuzhiyun size_t count;
6424*4882a593Smuzhiyun int save_len;
6425*4882a593Smuzhiyun int ret;
6426*4882a593Smuzhiyun
6427*4882a593Smuzhiyun /* Seq buffer is page-sized, exactly what we need. */
6428*4882a593Smuzhiyun for (;;) {
6429*4882a593Smuzhiyun save_len = iter->seq.seq.len;
6430*4882a593Smuzhiyun ret = print_trace_line(iter);
6431*4882a593Smuzhiyun
6432*4882a593Smuzhiyun if (trace_seq_has_overflowed(&iter->seq)) {
6433*4882a593Smuzhiyun iter->seq.seq.len = save_len;
6434*4882a593Smuzhiyun break;
6435*4882a593Smuzhiyun }
6436*4882a593Smuzhiyun
6437*4882a593Smuzhiyun /*
6438*4882a593Smuzhiyun * This should not be hit, because it should only
6439*4882a593Smuzhiyun * be set if the iter->seq overflowed. But check it
6440*4882a593Smuzhiyun * anyway to be safe.
6441*4882a593Smuzhiyun */
6442*4882a593Smuzhiyun if (ret == TRACE_TYPE_PARTIAL_LINE) {
6443*4882a593Smuzhiyun iter->seq.seq.len = save_len;
6444*4882a593Smuzhiyun break;
6445*4882a593Smuzhiyun }
6446*4882a593Smuzhiyun
6447*4882a593Smuzhiyun count = trace_seq_used(&iter->seq) - save_len;
6448*4882a593Smuzhiyun if (rem < count) {
6449*4882a593Smuzhiyun rem = 0;
6450*4882a593Smuzhiyun iter->seq.seq.len = save_len;
6451*4882a593Smuzhiyun break;
6452*4882a593Smuzhiyun }
6453*4882a593Smuzhiyun
6454*4882a593Smuzhiyun if (ret != TRACE_TYPE_NO_CONSUME)
6455*4882a593Smuzhiyun trace_consume(iter);
6456*4882a593Smuzhiyun rem -= count;
6457*4882a593Smuzhiyun if (!trace_find_next_entry_inc(iter)) {
6458*4882a593Smuzhiyun rem = 0;
6459*4882a593Smuzhiyun iter->ent = NULL;
6460*4882a593Smuzhiyun break;
6461*4882a593Smuzhiyun }
6462*4882a593Smuzhiyun }
6463*4882a593Smuzhiyun
6464*4882a593Smuzhiyun return rem;
6465*4882a593Smuzhiyun }
6466*4882a593Smuzhiyun
tracing_splice_read_pipe(struct file * filp,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)6467*4882a593Smuzhiyun static ssize_t tracing_splice_read_pipe(struct file *filp,
6468*4882a593Smuzhiyun loff_t *ppos,
6469*4882a593Smuzhiyun struct pipe_inode_info *pipe,
6470*4882a593Smuzhiyun size_t len,
6471*4882a593Smuzhiyun unsigned int flags)
6472*4882a593Smuzhiyun {
6473*4882a593Smuzhiyun struct page *pages_def[PIPE_DEF_BUFFERS];
6474*4882a593Smuzhiyun struct partial_page partial_def[PIPE_DEF_BUFFERS];
6475*4882a593Smuzhiyun struct trace_iterator *iter = filp->private_data;
6476*4882a593Smuzhiyun struct splice_pipe_desc spd = {
6477*4882a593Smuzhiyun .pages = pages_def,
6478*4882a593Smuzhiyun .partial = partial_def,
6479*4882a593Smuzhiyun .nr_pages = 0, /* This gets updated below. */
6480*4882a593Smuzhiyun .nr_pages_max = PIPE_DEF_BUFFERS,
6481*4882a593Smuzhiyun .ops = &default_pipe_buf_ops,
6482*4882a593Smuzhiyun .spd_release = tracing_spd_release_pipe,
6483*4882a593Smuzhiyun };
6484*4882a593Smuzhiyun ssize_t ret;
6485*4882a593Smuzhiyun size_t rem;
6486*4882a593Smuzhiyun unsigned int i;
6487*4882a593Smuzhiyun
6488*4882a593Smuzhiyun if (splice_grow_spd(pipe, &spd))
6489*4882a593Smuzhiyun return -ENOMEM;
6490*4882a593Smuzhiyun
6491*4882a593Smuzhiyun mutex_lock(&iter->mutex);
6492*4882a593Smuzhiyun
6493*4882a593Smuzhiyun if (iter->trace->splice_read) {
6494*4882a593Smuzhiyun ret = iter->trace->splice_read(iter, filp,
6495*4882a593Smuzhiyun ppos, pipe, len, flags);
6496*4882a593Smuzhiyun if (ret)
6497*4882a593Smuzhiyun goto out_err;
6498*4882a593Smuzhiyun }
6499*4882a593Smuzhiyun
6500*4882a593Smuzhiyun ret = tracing_wait_pipe(filp);
6501*4882a593Smuzhiyun if (ret <= 0)
6502*4882a593Smuzhiyun goto out_err;
6503*4882a593Smuzhiyun
6504*4882a593Smuzhiyun if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6505*4882a593Smuzhiyun ret = -EFAULT;
6506*4882a593Smuzhiyun goto out_err;
6507*4882a593Smuzhiyun }
6508*4882a593Smuzhiyun
6509*4882a593Smuzhiyun trace_event_read_lock();
6510*4882a593Smuzhiyun trace_access_lock(iter->cpu_file);
6511*4882a593Smuzhiyun
6512*4882a593Smuzhiyun /* Fill as many pages as possible. */
6513*4882a593Smuzhiyun for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6514*4882a593Smuzhiyun spd.pages[i] = alloc_page(GFP_KERNEL);
6515*4882a593Smuzhiyun if (!spd.pages[i])
6516*4882a593Smuzhiyun break;
6517*4882a593Smuzhiyun
6518*4882a593Smuzhiyun rem = tracing_fill_pipe_page(rem, iter);
6519*4882a593Smuzhiyun
6520*4882a593Smuzhiyun /* Copy the data into the page, so we can start over. */
6521*4882a593Smuzhiyun ret = trace_seq_to_buffer(&iter->seq,
6522*4882a593Smuzhiyun page_address(spd.pages[i]),
6523*4882a593Smuzhiyun trace_seq_used(&iter->seq));
6524*4882a593Smuzhiyun if (ret < 0) {
6525*4882a593Smuzhiyun __free_page(spd.pages[i]);
6526*4882a593Smuzhiyun break;
6527*4882a593Smuzhiyun }
6528*4882a593Smuzhiyun spd.partial[i].offset = 0;
6529*4882a593Smuzhiyun spd.partial[i].len = trace_seq_used(&iter->seq);
6530*4882a593Smuzhiyun
6531*4882a593Smuzhiyun trace_seq_init(&iter->seq);
6532*4882a593Smuzhiyun }
6533*4882a593Smuzhiyun
6534*4882a593Smuzhiyun trace_access_unlock(iter->cpu_file);
6535*4882a593Smuzhiyun trace_event_read_unlock();
6536*4882a593Smuzhiyun mutex_unlock(&iter->mutex);
6537*4882a593Smuzhiyun
6538*4882a593Smuzhiyun spd.nr_pages = i;
6539*4882a593Smuzhiyun
6540*4882a593Smuzhiyun if (i)
6541*4882a593Smuzhiyun ret = splice_to_pipe(pipe, &spd);
6542*4882a593Smuzhiyun else
6543*4882a593Smuzhiyun ret = 0;
6544*4882a593Smuzhiyun out:
6545*4882a593Smuzhiyun splice_shrink_spd(&spd);
6546*4882a593Smuzhiyun return ret;
6547*4882a593Smuzhiyun
6548*4882a593Smuzhiyun out_err:
6549*4882a593Smuzhiyun mutex_unlock(&iter->mutex);
6550*4882a593Smuzhiyun goto out;
6551*4882a593Smuzhiyun }
6552*4882a593Smuzhiyun
6553*4882a593Smuzhiyun static ssize_t
tracing_entries_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6554*4882a593Smuzhiyun tracing_entries_read(struct file *filp, char __user *ubuf,
6555*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6556*4882a593Smuzhiyun {
6557*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
6558*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
6559*4882a593Smuzhiyun int cpu = tracing_get_cpu(inode);
6560*4882a593Smuzhiyun char buf[64];
6561*4882a593Smuzhiyun int r = 0;
6562*4882a593Smuzhiyun ssize_t ret;
6563*4882a593Smuzhiyun
6564*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
6565*4882a593Smuzhiyun
6566*4882a593Smuzhiyun if (cpu == RING_BUFFER_ALL_CPUS) {
6567*4882a593Smuzhiyun int cpu, buf_size_same;
6568*4882a593Smuzhiyun unsigned long size;
6569*4882a593Smuzhiyun
6570*4882a593Smuzhiyun size = 0;
6571*4882a593Smuzhiyun buf_size_same = 1;
6572*4882a593Smuzhiyun /* check if all cpu sizes are same */
6573*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
6574*4882a593Smuzhiyun /* fill in the size from first enabled cpu */
6575*4882a593Smuzhiyun if (size == 0)
6576*4882a593Smuzhiyun size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6577*4882a593Smuzhiyun if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6578*4882a593Smuzhiyun buf_size_same = 0;
6579*4882a593Smuzhiyun break;
6580*4882a593Smuzhiyun }
6581*4882a593Smuzhiyun }
6582*4882a593Smuzhiyun
6583*4882a593Smuzhiyun if (buf_size_same) {
6584*4882a593Smuzhiyun if (!ring_buffer_expanded)
6585*4882a593Smuzhiyun r = sprintf(buf, "%lu (expanded: %lu)\n",
6586*4882a593Smuzhiyun size >> 10,
6587*4882a593Smuzhiyun trace_buf_size >> 10);
6588*4882a593Smuzhiyun else
6589*4882a593Smuzhiyun r = sprintf(buf, "%lu\n", size >> 10);
6590*4882a593Smuzhiyun } else
6591*4882a593Smuzhiyun r = sprintf(buf, "X\n");
6592*4882a593Smuzhiyun } else
6593*4882a593Smuzhiyun r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6594*4882a593Smuzhiyun
6595*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6596*4882a593Smuzhiyun
6597*4882a593Smuzhiyun ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6598*4882a593Smuzhiyun return ret;
6599*4882a593Smuzhiyun }
6600*4882a593Smuzhiyun
6601*4882a593Smuzhiyun static ssize_t
tracing_entries_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6602*4882a593Smuzhiyun tracing_entries_write(struct file *filp, const char __user *ubuf,
6603*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6604*4882a593Smuzhiyun {
6605*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
6606*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
6607*4882a593Smuzhiyun unsigned long val;
6608*4882a593Smuzhiyun int ret;
6609*4882a593Smuzhiyun
6610*4882a593Smuzhiyun ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6611*4882a593Smuzhiyun if (ret)
6612*4882a593Smuzhiyun return ret;
6613*4882a593Smuzhiyun
6614*4882a593Smuzhiyun /* must have at least 1 entry */
6615*4882a593Smuzhiyun if (!val)
6616*4882a593Smuzhiyun return -EINVAL;
6617*4882a593Smuzhiyun
6618*4882a593Smuzhiyun /* value is in KB */
6619*4882a593Smuzhiyun val <<= 10;
6620*4882a593Smuzhiyun ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6621*4882a593Smuzhiyun if (ret < 0)
6622*4882a593Smuzhiyun return ret;
6623*4882a593Smuzhiyun
6624*4882a593Smuzhiyun *ppos += cnt;
6625*4882a593Smuzhiyun
6626*4882a593Smuzhiyun return cnt;
6627*4882a593Smuzhiyun }
6628*4882a593Smuzhiyun
6629*4882a593Smuzhiyun static ssize_t
tracing_total_entries_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6630*4882a593Smuzhiyun tracing_total_entries_read(struct file *filp, char __user *ubuf,
6631*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6632*4882a593Smuzhiyun {
6633*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
6634*4882a593Smuzhiyun char buf[64];
6635*4882a593Smuzhiyun int r, cpu;
6636*4882a593Smuzhiyun unsigned long size = 0, expanded_size = 0;
6637*4882a593Smuzhiyun
6638*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
6639*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
6640*4882a593Smuzhiyun size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6641*4882a593Smuzhiyun if (!ring_buffer_expanded)
6642*4882a593Smuzhiyun expanded_size += trace_buf_size >> 10;
6643*4882a593Smuzhiyun }
6644*4882a593Smuzhiyun if (ring_buffer_expanded)
6645*4882a593Smuzhiyun r = sprintf(buf, "%lu\n", size);
6646*4882a593Smuzhiyun else
6647*4882a593Smuzhiyun r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6648*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6649*4882a593Smuzhiyun
6650*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6651*4882a593Smuzhiyun }
6652*4882a593Smuzhiyun
6653*4882a593Smuzhiyun static ssize_t
tracing_free_buffer_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6654*4882a593Smuzhiyun tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6655*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
6656*4882a593Smuzhiyun {
6657*4882a593Smuzhiyun /*
6658*4882a593Smuzhiyun * There is no need to read what the user has written, this function
6659*4882a593Smuzhiyun * is just to make sure that there is no error when "echo" is used
6660*4882a593Smuzhiyun */
6661*4882a593Smuzhiyun
6662*4882a593Smuzhiyun *ppos += cnt;
6663*4882a593Smuzhiyun
6664*4882a593Smuzhiyun return cnt;
6665*4882a593Smuzhiyun }
6666*4882a593Smuzhiyun
6667*4882a593Smuzhiyun static int
tracing_free_buffer_release(struct inode * inode,struct file * filp)6668*4882a593Smuzhiyun tracing_free_buffer_release(struct inode *inode, struct file *filp)
6669*4882a593Smuzhiyun {
6670*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
6671*4882a593Smuzhiyun
6672*4882a593Smuzhiyun /* disable tracing ? */
6673*4882a593Smuzhiyun if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6674*4882a593Smuzhiyun tracer_tracing_off(tr);
6675*4882a593Smuzhiyun /* resize the ring buffer to 0 */
6676*4882a593Smuzhiyun tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6677*4882a593Smuzhiyun
6678*4882a593Smuzhiyun trace_array_put(tr);
6679*4882a593Smuzhiyun
6680*4882a593Smuzhiyun return 0;
6681*4882a593Smuzhiyun }
6682*4882a593Smuzhiyun
6683*4882a593Smuzhiyun static ssize_t
tracing_mark_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)6684*4882a593Smuzhiyun tracing_mark_write(struct file *filp, const char __user *ubuf,
6685*4882a593Smuzhiyun size_t cnt, loff_t *fpos)
6686*4882a593Smuzhiyun {
6687*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
6688*4882a593Smuzhiyun struct ring_buffer_event *event;
6689*4882a593Smuzhiyun enum event_trigger_type tt = ETT_NONE;
6690*4882a593Smuzhiyun struct trace_buffer *buffer;
6691*4882a593Smuzhiyun struct print_entry *entry;
6692*4882a593Smuzhiyun unsigned long irq_flags;
6693*4882a593Smuzhiyun ssize_t written;
6694*4882a593Smuzhiyun int size;
6695*4882a593Smuzhiyun int len;
6696*4882a593Smuzhiyun
6697*4882a593Smuzhiyun /* Used in tracing_mark_raw_write() as well */
6698*4882a593Smuzhiyun #define FAULTED_STR "<faulted>"
6699*4882a593Smuzhiyun #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6700*4882a593Smuzhiyun
6701*4882a593Smuzhiyun if (tracing_disabled)
6702*4882a593Smuzhiyun return -EINVAL;
6703*4882a593Smuzhiyun
6704*4882a593Smuzhiyun if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6705*4882a593Smuzhiyun return -EINVAL;
6706*4882a593Smuzhiyun
6707*4882a593Smuzhiyun if (cnt > TRACE_BUF_SIZE)
6708*4882a593Smuzhiyun cnt = TRACE_BUF_SIZE;
6709*4882a593Smuzhiyun
6710*4882a593Smuzhiyun BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6711*4882a593Smuzhiyun
6712*4882a593Smuzhiyun local_save_flags(irq_flags);
6713*4882a593Smuzhiyun size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6714*4882a593Smuzhiyun
6715*4882a593Smuzhiyun /* If less than "<faulted>", then make sure we can still add that */
6716*4882a593Smuzhiyun if (cnt < FAULTED_SIZE)
6717*4882a593Smuzhiyun size += FAULTED_SIZE - cnt;
6718*4882a593Smuzhiyun
6719*4882a593Smuzhiyun buffer = tr->array_buffer.buffer;
6720*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6721*4882a593Smuzhiyun irq_flags, preempt_count());
6722*4882a593Smuzhiyun if (unlikely(!event))
6723*4882a593Smuzhiyun /* Ring buffer disabled, return as if not open for write */
6724*4882a593Smuzhiyun return -EBADF;
6725*4882a593Smuzhiyun
6726*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
6727*4882a593Smuzhiyun entry->ip = _THIS_IP_;
6728*4882a593Smuzhiyun
6729*4882a593Smuzhiyun len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6730*4882a593Smuzhiyun if (len) {
6731*4882a593Smuzhiyun memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6732*4882a593Smuzhiyun cnt = FAULTED_SIZE;
6733*4882a593Smuzhiyun written = -EFAULT;
6734*4882a593Smuzhiyun } else
6735*4882a593Smuzhiyun written = cnt;
6736*4882a593Smuzhiyun
6737*4882a593Smuzhiyun if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6738*4882a593Smuzhiyun /* do not add \n before testing triggers, but add \0 */
6739*4882a593Smuzhiyun entry->buf[cnt] = '\0';
6740*4882a593Smuzhiyun tt = event_triggers_call(tr->trace_marker_file, entry, event);
6741*4882a593Smuzhiyun }
6742*4882a593Smuzhiyun
6743*4882a593Smuzhiyun if (entry->buf[cnt - 1] != '\n') {
6744*4882a593Smuzhiyun entry->buf[cnt] = '\n';
6745*4882a593Smuzhiyun entry->buf[cnt + 1] = '\0';
6746*4882a593Smuzhiyun } else
6747*4882a593Smuzhiyun entry->buf[cnt] = '\0';
6748*4882a593Smuzhiyun
6749*4882a593Smuzhiyun if (static_branch_unlikely(&trace_marker_exports_enabled))
6750*4882a593Smuzhiyun ftrace_exports(event, TRACE_EXPORT_MARKER);
6751*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
6752*4882a593Smuzhiyun
6753*4882a593Smuzhiyun if (tt)
6754*4882a593Smuzhiyun event_triggers_post_call(tr->trace_marker_file, tt);
6755*4882a593Smuzhiyun
6756*4882a593Smuzhiyun if (written > 0)
6757*4882a593Smuzhiyun *fpos += written;
6758*4882a593Smuzhiyun
6759*4882a593Smuzhiyun return written;
6760*4882a593Smuzhiyun }
6761*4882a593Smuzhiyun
6762*4882a593Smuzhiyun /* Limit it for now to 3K (including tag) */
6763*4882a593Smuzhiyun #define RAW_DATA_MAX_SIZE (1024*3)
6764*4882a593Smuzhiyun
6765*4882a593Smuzhiyun static ssize_t
tracing_mark_raw_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)6766*4882a593Smuzhiyun tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6767*4882a593Smuzhiyun size_t cnt, loff_t *fpos)
6768*4882a593Smuzhiyun {
6769*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
6770*4882a593Smuzhiyun struct ring_buffer_event *event;
6771*4882a593Smuzhiyun struct trace_buffer *buffer;
6772*4882a593Smuzhiyun struct raw_data_entry *entry;
6773*4882a593Smuzhiyun unsigned long irq_flags;
6774*4882a593Smuzhiyun ssize_t written;
6775*4882a593Smuzhiyun int size;
6776*4882a593Smuzhiyun int len;
6777*4882a593Smuzhiyun
6778*4882a593Smuzhiyun #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6779*4882a593Smuzhiyun
6780*4882a593Smuzhiyun if (tracing_disabled)
6781*4882a593Smuzhiyun return -EINVAL;
6782*4882a593Smuzhiyun
6783*4882a593Smuzhiyun if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6784*4882a593Smuzhiyun return -EINVAL;
6785*4882a593Smuzhiyun
6786*4882a593Smuzhiyun /* The marker must at least have a tag id */
6787*4882a593Smuzhiyun if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6788*4882a593Smuzhiyun return -EINVAL;
6789*4882a593Smuzhiyun
6790*4882a593Smuzhiyun if (cnt > TRACE_BUF_SIZE)
6791*4882a593Smuzhiyun cnt = TRACE_BUF_SIZE;
6792*4882a593Smuzhiyun
6793*4882a593Smuzhiyun BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6794*4882a593Smuzhiyun
6795*4882a593Smuzhiyun local_save_flags(irq_flags);
6796*4882a593Smuzhiyun size = sizeof(*entry) + cnt;
6797*4882a593Smuzhiyun if (cnt < FAULT_SIZE_ID)
6798*4882a593Smuzhiyun size += FAULT_SIZE_ID - cnt;
6799*4882a593Smuzhiyun
6800*4882a593Smuzhiyun buffer = tr->array_buffer.buffer;
6801*4882a593Smuzhiyun event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6802*4882a593Smuzhiyun irq_flags, preempt_count());
6803*4882a593Smuzhiyun if (!event)
6804*4882a593Smuzhiyun /* Ring buffer disabled, return as if not open for write */
6805*4882a593Smuzhiyun return -EBADF;
6806*4882a593Smuzhiyun
6807*4882a593Smuzhiyun entry = ring_buffer_event_data(event);
6808*4882a593Smuzhiyun
6809*4882a593Smuzhiyun len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6810*4882a593Smuzhiyun if (len) {
6811*4882a593Smuzhiyun entry->id = -1;
6812*4882a593Smuzhiyun memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6813*4882a593Smuzhiyun written = -EFAULT;
6814*4882a593Smuzhiyun } else
6815*4882a593Smuzhiyun written = cnt;
6816*4882a593Smuzhiyun
6817*4882a593Smuzhiyun __buffer_unlock_commit(buffer, event);
6818*4882a593Smuzhiyun
6819*4882a593Smuzhiyun if (written > 0)
6820*4882a593Smuzhiyun *fpos += written;
6821*4882a593Smuzhiyun
6822*4882a593Smuzhiyun return written;
6823*4882a593Smuzhiyun }
6824*4882a593Smuzhiyun
tracing_clock_show(struct seq_file * m,void * v)6825*4882a593Smuzhiyun static int tracing_clock_show(struct seq_file *m, void *v)
6826*4882a593Smuzhiyun {
6827*4882a593Smuzhiyun struct trace_array *tr = m->private;
6828*4882a593Smuzhiyun int i;
6829*4882a593Smuzhiyun
6830*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6831*4882a593Smuzhiyun seq_printf(m,
6832*4882a593Smuzhiyun "%s%s%s%s", i ? " " : "",
6833*4882a593Smuzhiyun i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6834*4882a593Smuzhiyun i == tr->clock_id ? "]" : "");
6835*4882a593Smuzhiyun seq_putc(m, '\n');
6836*4882a593Smuzhiyun
6837*4882a593Smuzhiyun return 0;
6838*4882a593Smuzhiyun }
6839*4882a593Smuzhiyun
tracing_set_clock(struct trace_array * tr,const char * clockstr)6840*4882a593Smuzhiyun int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6841*4882a593Smuzhiyun {
6842*4882a593Smuzhiyun int i;
6843*4882a593Smuzhiyun
6844*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6845*4882a593Smuzhiyun if (strcmp(trace_clocks[i].name, clockstr) == 0)
6846*4882a593Smuzhiyun break;
6847*4882a593Smuzhiyun }
6848*4882a593Smuzhiyun if (i == ARRAY_SIZE(trace_clocks))
6849*4882a593Smuzhiyun return -EINVAL;
6850*4882a593Smuzhiyun
6851*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
6852*4882a593Smuzhiyun
6853*4882a593Smuzhiyun tr->clock_id = i;
6854*4882a593Smuzhiyun
6855*4882a593Smuzhiyun ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6856*4882a593Smuzhiyun
6857*4882a593Smuzhiyun /*
6858*4882a593Smuzhiyun * New clock may not be consistent with the previous clock.
6859*4882a593Smuzhiyun * Reset the buffer so that it doesn't have incomparable timestamps.
6860*4882a593Smuzhiyun */
6861*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->array_buffer);
6862*4882a593Smuzhiyun
6863*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
6864*4882a593Smuzhiyun if (tr->max_buffer.buffer)
6865*4882a593Smuzhiyun ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6866*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->max_buffer);
6867*4882a593Smuzhiyun #endif
6868*4882a593Smuzhiyun
6869*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6870*4882a593Smuzhiyun
6871*4882a593Smuzhiyun return 0;
6872*4882a593Smuzhiyun }
6873*4882a593Smuzhiyun
tracing_clock_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)6874*4882a593Smuzhiyun static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6875*4882a593Smuzhiyun size_t cnt, loff_t *fpos)
6876*4882a593Smuzhiyun {
6877*4882a593Smuzhiyun struct seq_file *m = filp->private_data;
6878*4882a593Smuzhiyun struct trace_array *tr = m->private;
6879*4882a593Smuzhiyun char buf[64];
6880*4882a593Smuzhiyun const char *clockstr;
6881*4882a593Smuzhiyun int ret;
6882*4882a593Smuzhiyun
6883*4882a593Smuzhiyun if (cnt >= sizeof(buf))
6884*4882a593Smuzhiyun return -EINVAL;
6885*4882a593Smuzhiyun
6886*4882a593Smuzhiyun if (copy_from_user(buf, ubuf, cnt))
6887*4882a593Smuzhiyun return -EFAULT;
6888*4882a593Smuzhiyun
6889*4882a593Smuzhiyun buf[cnt] = 0;
6890*4882a593Smuzhiyun
6891*4882a593Smuzhiyun clockstr = strstrip(buf);
6892*4882a593Smuzhiyun
6893*4882a593Smuzhiyun ret = tracing_set_clock(tr, clockstr);
6894*4882a593Smuzhiyun if (ret)
6895*4882a593Smuzhiyun return ret;
6896*4882a593Smuzhiyun
6897*4882a593Smuzhiyun *fpos += cnt;
6898*4882a593Smuzhiyun
6899*4882a593Smuzhiyun return cnt;
6900*4882a593Smuzhiyun }
6901*4882a593Smuzhiyun
tracing_clock_open(struct inode * inode,struct file * file)6902*4882a593Smuzhiyun static int tracing_clock_open(struct inode *inode, struct file *file)
6903*4882a593Smuzhiyun {
6904*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
6905*4882a593Smuzhiyun int ret;
6906*4882a593Smuzhiyun
6907*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
6908*4882a593Smuzhiyun if (ret)
6909*4882a593Smuzhiyun return ret;
6910*4882a593Smuzhiyun
6911*4882a593Smuzhiyun ret = single_open(file, tracing_clock_show, inode->i_private);
6912*4882a593Smuzhiyun if (ret < 0)
6913*4882a593Smuzhiyun trace_array_put(tr);
6914*4882a593Smuzhiyun
6915*4882a593Smuzhiyun return ret;
6916*4882a593Smuzhiyun }
6917*4882a593Smuzhiyun
tracing_time_stamp_mode_show(struct seq_file * m,void * v)6918*4882a593Smuzhiyun static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6919*4882a593Smuzhiyun {
6920*4882a593Smuzhiyun struct trace_array *tr = m->private;
6921*4882a593Smuzhiyun
6922*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
6923*4882a593Smuzhiyun
6924*4882a593Smuzhiyun if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6925*4882a593Smuzhiyun seq_puts(m, "delta [absolute]\n");
6926*4882a593Smuzhiyun else
6927*4882a593Smuzhiyun seq_puts(m, "[delta] absolute\n");
6928*4882a593Smuzhiyun
6929*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6930*4882a593Smuzhiyun
6931*4882a593Smuzhiyun return 0;
6932*4882a593Smuzhiyun }
6933*4882a593Smuzhiyun
tracing_time_stamp_mode_open(struct inode * inode,struct file * file)6934*4882a593Smuzhiyun static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6935*4882a593Smuzhiyun {
6936*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
6937*4882a593Smuzhiyun int ret;
6938*4882a593Smuzhiyun
6939*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
6940*4882a593Smuzhiyun if (ret)
6941*4882a593Smuzhiyun return ret;
6942*4882a593Smuzhiyun
6943*4882a593Smuzhiyun ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6944*4882a593Smuzhiyun if (ret < 0)
6945*4882a593Smuzhiyun trace_array_put(tr);
6946*4882a593Smuzhiyun
6947*4882a593Smuzhiyun return ret;
6948*4882a593Smuzhiyun }
6949*4882a593Smuzhiyun
tracing_set_time_stamp_abs(struct trace_array * tr,bool abs)6950*4882a593Smuzhiyun int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6951*4882a593Smuzhiyun {
6952*4882a593Smuzhiyun int ret = 0;
6953*4882a593Smuzhiyun
6954*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
6955*4882a593Smuzhiyun
6956*4882a593Smuzhiyun if (abs && tr->time_stamp_abs_ref++)
6957*4882a593Smuzhiyun goto out;
6958*4882a593Smuzhiyun
6959*4882a593Smuzhiyun if (!abs) {
6960*4882a593Smuzhiyun if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6961*4882a593Smuzhiyun ret = -EINVAL;
6962*4882a593Smuzhiyun goto out;
6963*4882a593Smuzhiyun }
6964*4882a593Smuzhiyun
6965*4882a593Smuzhiyun if (--tr->time_stamp_abs_ref)
6966*4882a593Smuzhiyun goto out;
6967*4882a593Smuzhiyun }
6968*4882a593Smuzhiyun
6969*4882a593Smuzhiyun ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
6970*4882a593Smuzhiyun
6971*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
6972*4882a593Smuzhiyun if (tr->max_buffer.buffer)
6973*4882a593Smuzhiyun ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6974*4882a593Smuzhiyun #endif
6975*4882a593Smuzhiyun out:
6976*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
6977*4882a593Smuzhiyun
6978*4882a593Smuzhiyun return ret;
6979*4882a593Smuzhiyun }
6980*4882a593Smuzhiyun
6981*4882a593Smuzhiyun struct ftrace_buffer_info {
6982*4882a593Smuzhiyun struct trace_iterator iter;
6983*4882a593Smuzhiyun void *spare;
6984*4882a593Smuzhiyun unsigned int spare_cpu;
6985*4882a593Smuzhiyun unsigned int read;
6986*4882a593Smuzhiyun };
6987*4882a593Smuzhiyun
6988*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
tracing_snapshot_open(struct inode * inode,struct file * file)6989*4882a593Smuzhiyun static int tracing_snapshot_open(struct inode *inode, struct file *file)
6990*4882a593Smuzhiyun {
6991*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
6992*4882a593Smuzhiyun struct trace_iterator *iter;
6993*4882a593Smuzhiyun struct seq_file *m;
6994*4882a593Smuzhiyun int ret;
6995*4882a593Smuzhiyun
6996*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
6997*4882a593Smuzhiyun if (ret)
6998*4882a593Smuzhiyun return ret;
6999*4882a593Smuzhiyun
7000*4882a593Smuzhiyun if (file->f_mode & FMODE_READ) {
7001*4882a593Smuzhiyun iter = __tracing_open(inode, file, true);
7002*4882a593Smuzhiyun if (IS_ERR(iter))
7003*4882a593Smuzhiyun ret = PTR_ERR(iter);
7004*4882a593Smuzhiyun } else {
7005*4882a593Smuzhiyun /* Writes still need the seq_file to hold the private data */
7006*4882a593Smuzhiyun ret = -ENOMEM;
7007*4882a593Smuzhiyun m = kzalloc(sizeof(*m), GFP_KERNEL);
7008*4882a593Smuzhiyun if (!m)
7009*4882a593Smuzhiyun goto out;
7010*4882a593Smuzhiyun iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7011*4882a593Smuzhiyun if (!iter) {
7012*4882a593Smuzhiyun kfree(m);
7013*4882a593Smuzhiyun goto out;
7014*4882a593Smuzhiyun }
7015*4882a593Smuzhiyun ret = 0;
7016*4882a593Smuzhiyun
7017*4882a593Smuzhiyun iter->tr = tr;
7018*4882a593Smuzhiyun iter->array_buffer = &tr->max_buffer;
7019*4882a593Smuzhiyun iter->cpu_file = tracing_get_cpu(inode);
7020*4882a593Smuzhiyun m->private = iter;
7021*4882a593Smuzhiyun file->private_data = m;
7022*4882a593Smuzhiyun }
7023*4882a593Smuzhiyun out:
7024*4882a593Smuzhiyun if (ret < 0)
7025*4882a593Smuzhiyun trace_array_put(tr);
7026*4882a593Smuzhiyun
7027*4882a593Smuzhiyun return ret;
7028*4882a593Smuzhiyun }
7029*4882a593Smuzhiyun
7030*4882a593Smuzhiyun static ssize_t
tracing_snapshot_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7031*4882a593Smuzhiyun tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7032*4882a593Smuzhiyun loff_t *ppos)
7033*4882a593Smuzhiyun {
7034*4882a593Smuzhiyun struct seq_file *m = filp->private_data;
7035*4882a593Smuzhiyun struct trace_iterator *iter = m->private;
7036*4882a593Smuzhiyun struct trace_array *tr = iter->tr;
7037*4882a593Smuzhiyun unsigned long val;
7038*4882a593Smuzhiyun int ret;
7039*4882a593Smuzhiyun
7040*4882a593Smuzhiyun ret = tracing_update_buffers();
7041*4882a593Smuzhiyun if (ret < 0)
7042*4882a593Smuzhiyun return ret;
7043*4882a593Smuzhiyun
7044*4882a593Smuzhiyun ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7045*4882a593Smuzhiyun if (ret)
7046*4882a593Smuzhiyun return ret;
7047*4882a593Smuzhiyun
7048*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
7049*4882a593Smuzhiyun
7050*4882a593Smuzhiyun if (tr->current_trace->use_max_tr) {
7051*4882a593Smuzhiyun ret = -EBUSY;
7052*4882a593Smuzhiyun goto out;
7053*4882a593Smuzhiyun }
7054*4882a593Smuzhiyun
7055*4882a593Smuzhiyun local_irq_disable();
7056*4882a593Smuzhiyun arch_spin_lock(&tr->max_lock);
7057*4882a593Smuzhiyun if (tr->cond_snapshot)
7058*4882a593Smuzhiyun ret = -EBUSY;
7059*4882a593Smuzhiyun arch_spin_unlock(&tr->max_lock);
7060*4882a593Smuzhiyun local_irq_enable();
7061*4882a593Smuzhiyun if (ret)
7062*4882a593Smuzhiyun goto out;
7063*4882a593Smuzhiyun
7064*4882a593Smuzhiyun switch (val) {
7065*4882a593Smuzhiyun case 0:
7066*4882a593Smuzhiyun if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7067*4882a593Smuzhiyun ret = -EINVAL;
7068*4882a593Smuzhiyun break;
7069*4882a593Smuzhiyun }
7070*4882a593Smuzhiyun if (tr->allocated_snapshot)
7071*4882a593Smuzhiyun free_snapshot(tr);
7072*4882a593Smuzhiyun break;
7073*4882a593Smuzhiyun case 1:
7074*4882a593Smuzhiyun /* Only allow per-cpu swap if the ring buffer supports it */
7075*4882a593Smuzhiyun #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7076*4882a593Smuzhiyun if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7077*4882a593Smuzhiyun ret = -EINVAL;
7078*4882a593Smuzhiyun break;
7079*4882a593Smuzhiyun }
7080*4882a593Smuzhiyun #endif
7081*4882a593Smuzhiyun if (tr->allocated_snapshot)
7082*4882a593Smuzhiyun ret = resize_buffer_duplicate_size(&tr->max_buffer,
7083*4882a593Smuzhiyun &tr->array_buffer, iter->cpu_file);
7084*4882a593Smuzhiyun else
7085*4882a593Smuzhiyun ret = tracing_alloc_snapshot_instance(tr);
7086*4882a593Smuzhiyun if (ret < 0)
7087*4882a593Smuzhiyun break;
7088*4882a593Smuzhiyun local_irq_disable();
7089*4882a593Smuzhiyun /* Now, we're going to swap */
7090*4882a593Smuzhiyun if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7091*4882a593Smuzhiyun update_max_tr(tr, current, smp_processor_id(), NULL);
7092*4882a593Smuzhiyun else
7093*4882a593Smuzhiyun update_max_tr_single(tr, current, iter->cpu_file);
7094*4882a593Smuzhiyun local_irq_enable();
7095*4882a593Smuzhiyun break;
7096*4882a593Smuzhiyun default:
7097*4882a593Smuzhiyun if (tr->allocated_snapshot) {
7098*4882a593Smuzhiyun if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7099*4882a593Smuzhiyun tracing_reset_online_cpus(&tr->max_buffer);
7100*4882a593Smuzhiyun else
7101*4882a593Smuzhiyun tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7102*4882a593Smuzhiyun }
7103*4882a593Smuzhiyun break;
7104*4882a593Smuzhiyun }
7105*4882a593Smuzhiyun
7106*4882a593Smuzhiyun if (ret >= 0) {
7107*4882a593Smuzhiyun *ppos += cnt;
7108*4882a593Smuzhiyun ret = cnt;
7109*4882a593Smuzhiyun }
7110*4882a593Smuzhiyun out:
7111*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
7112*4882a593Smuzhiyun return ret;
7113*4882a593Smuzhiyun }
7114*4882a593Smuzhiyun
tracing_snapshot_release(struct inode * inode,struct file * file)7115*4882a593Smuzhiyun static int tracing_snapshot_release(struct inode *inode, struct file *file)
7116*4882a593Smuzhiyun {
7117*4882a593Smuzhiyun struct seq_file *m = file->private_data;
7118*4882a593Smuzhiyun int ret;
7119*4882a593Smuzhiyun
7120*4882a593Smuzhiyun ret = tracing_release(inode, file);
7121*4882a593Smuzhiyun
7122*4882a593Smuzhiyun if (file->f_mode & FMODE_READ)
7123*4882a593Smuzhiyun return ret;
7124*4882a593Smuzhiyun
7125*4882a593Smuzhiyun /* If write only, the seq_file is just a stub */
7126*4882a593Smuzhiyun if (m)
7127*4882a593Smuzhiyun kfree(m->private);
7128*4882a593Smuzhiyun kfree(m);
7129*4882a593Smuzhiyun
7130*4882a593Smuzhiyun return 0;
7131*4882a593Smuzhiyun }
7132*4882a593Smuzhiyun
7133*4882a593Smuzhiyun static int tracing_buffers_open(struct inode *inode, struct file *filp);
7134*4882a593Smuzhiyun static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7135*4882a593Smuzhiyun size_t count, loff_t *ppos);
7136*4882a593Smuzhiyun static int tracing_buffers_release(struct inode *inode, struct file *file);
7137*4882a593Smuzhiyun static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7138*4882a593Smuzhiyun struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7139*4882a593Smuzhiyun
snapshot_raw_open(struct inode * inode,struct file * filp)7140*4882a593Smuzhiyun static int snapshot_raw_open(struct inode *inode, struct file *filp)
7141*4882a593Smuzhiyun {
7142*4882a593Smuzhiyun struct ftrace_buffer_info *info;
7143*4882a593Smuzhiyun int ret;
7144*4882a593Smuzhiyun
7145*4882a593Smuzhiyun /* The following checks for tracefs lockdown */
7146*4882a593Smuzhiyun ret = tracing_buffers_open(inode, filp);
7147*4882a593Smuzhiyun if (ret < 0)
7148*4882a593Smuzhiyun return ret;
7149*4882a593Smuzhiyun
7150*4882a593Smuzhiyun info = filp->private_data;
7151*4882a593Smuzhiyun
7152*4882a593Smuzhiyun if (info->iter.trace->use_max_tr) {
7153*4882a593Smuzhiyun tracing_buffers_release(inode, filp);
7154*4882a593Smuzhiyun return -EBUSY;
7155*4882a593Smuzhiyun }
7156*4882a593Smuzhiyun
7157*4882a593Smuzhiyun info->iter.snapshot = true;
7158*4882a593Smuzhiyun info->iter.array_buffer = &info->iter.tr->max_buffer;
7159*4882a593Smuzhiyun
7160*4882a593Smuzhiyun return ret;
7161*4882a593Smuzhiyun }
7162*4882a593Smuzhiyun
7163*4882a593Smuzhiyun #endif /* CONFIG_TRACER_SNAPSHOT */
7164*4882a593Smuzhiyun
7165*4882a593Smuzhiyun
7166*4882a593Smuzhiyun static const struct file_operations tracing_thresh_fops = {
7167*4882a593Smuzhiyun .open = tracing_open_generic,
7168*4882a593Smuzhiyun .read = tracing_thresh_read,
7169*4882a593Smuzhiyun .write = tracing_thresh_write,
7170*4882a593Smuzhiyun .llseek = generic_file_llseek,
7171*4882a593Smuzhiyun };
7172*4882a593Smuzhiyun
7173*4882a593Smuzhiyun #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7174*4882a593Smuzhiyun static const struct file_operations tracing_max_lat_fops = {
7175*4882a593Smuzhiyun .open = tracing_open_generic,
7176*4882a593Smuzhiyun .read = tracing_max_lat_read,
7177*4882a593Smuzhiyun .write = tracing_max_lat_write,
7178*4882a593Smuzhiyun .llseek = generic_file_llseek,
7179*4882a593Smuzhiyun };
7180*4882a593Smuzhiyun #endif
7181*4882a593Smuzhiyun
7182*4882a593Smuzhiyun static const struct file_operations set_tracer_fops = {
7183*4882a593Smuzhiyun .open = tracing_open_generic,
7184*4882a593Smuzhiyun .read = tracing_set_trace_read,
7185*4882a593Smuzhiyun .write = tracing_set_trace_write,
7186*4882a593Smuzhiyun .llseek = generic_file_llseek,
7187*4882a593Smuzhiyun };
7188*4882a593Smuzhiyun
7189*4882a593Smuzhiyun static const struct file_operations tracing_pipe_fops = {
7190*4882a593Smuzhiyun .open = tracing_open_pipe,
7191*4882a593Smuzhiyun .poll = tracing_poll_pipe,
7192*4882a593Smuzhiyun .read = tracing_read_pipe,
7193*4882a593Smuzhiyun .splice_read = tracing_splice_read_pipe,
7194*4882a593Smuzhiyun .release = tracing_release_pipe,
7195*4882a593Smuzhiyun .llseek = no_llseek,
7196*4882a593Smuzhiyun };
7197*4882a593Smuzhiyun
7198*4882a593Smuzhiyun static const struct file_operations tracing_entries_fops = {
7199*4882a593Smuzhiyun .open = tracing_open_generic_tr,
7200*4882a593Smuzhiyun .read = tracing_entries_read,
7201*4882a593Smuzhiyun .write = tracing_entries_write,
7202*4882a593Smuzhiyun .llseek = generic_file_llseek,
7203*4882a593Smuzhiyun .release = tracing_release_generic_tr,
7204*4882a593Smuzhiyun };
7205*4882a593Smuzhiyun
7206*4882a593Smuzhiyun static const struct file_operations tracing_total_entries_fops = {
7207*4882a593Smuzhiyun .open = tracing_open_generic_tr,
7208*4882a593Smuzhiyun .read = tracing_total_entries_read,
7209*4882a593Smuzhiyun .llseek = generic_file_llseek,
7210*4882a593Smuzhiyun .release = tracing_release_generic_tr,
7211*4882a593Smuzhiyun };
7212*4882a593Smuzhiyun
7213*4882a593Smuzhiyun static const struct file_operations tracing_free_buffer_fops = {
7214*4882a593Smuzhiyun .open = tracing_open_generic_tr,
7215*4882a593Smuzhiyun .write = tracing_free_buffer_write,
7216*4882a593Smuzhiyun .release = tracing_free_buffer_release,
7217*4882a593Smuzhiyun };
7218*4882a593Smuzhiyun
7219*4882a593Smuzhiyun static const struct file_operations tracing_mark_fops = {
7220*4882a593Smuzhiyun .open = tracing_open_generic_tr,
7221*4882a593Smuzhiyun .write = tracing_mark_write,
7222*4882a593Smuzhiyun .llseek = generic_file_llseek,
7223*4882a593Smuzhiyun .release = tracing_release_generic_tr,
7224*4882a593Smuzhiyun };
7225*4882a593Smuzhiyun
7226*4882a593Smuzhiyun static const struct file_operations tracing_mark_raw_fops = {
7227*4882a593Smuzhiyun .open = tracing_open_generic_tr,
7228*4882a593Smuzhiyun .write = tracing_mark_raw_write,
7229*4882a593Smuzhiyun .llseek = generic_file_llseek,
7230*4882a593Smuzhiyun .release = tracing_release_generic_tr,
7231*4882a593Smuzhiyun };
7232*4882a593Smuzhiyun
7233*4882a593Smuzhiyun static const struct file_operations trace_clock_fops = {
7234*4882a593Smuzhiyun .open = tracing_clock_open,
7235*4882a593Smuzhiyun .read = seq_read,
7236*4882a593Smuzhiyun .llseek = seq_lseek,
7237*4882a593Smuzhiyun .release = tracing_single_release_tr,
7238*4882a593Smuzhiyun .write = tracing_clock_write,
7239*4882a593Smuzhiyun };
7240*4882a593Smuzhiyun
7241*4882a593Smuzhiyun static const struct file_operations trace_time_stamp_mode_fops = {
7242*4882a593Smuzhiyun .open = tracing_time_stamp_mode_open,
7243*4882a593Smuzhiyun .read = seq_read,
7244*4882a593Smuzhiyun .llseek = seq_lseek,
7245*4882a593Smuzhiyun .release = tracing_single_release_tr,
7246*4882a593Smuzhiyun };
7247*4882a593Smuzhiyun
7248*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
7249*4882a593Smuzhiyun static const struct file_operations snapshot_fops = {
7250*4882a593Smuzhiyun .open = tracing_snapshot_open,
7251*4882a593Smuzhiyun .read = seq_read,
7252*4882a593Smuzhiyun .write = tracing_snapshot_write,
7253*4882a593Smuzhiyun .llseek = tracing_lseek,
7254*4882a593Smuzhiyun .release = tracing_snapshot_release,
7255*4882a593Smuzhiyun };
7256*4882a593Smuzhiyun
7257*4882a593Smuzhiyun static const struct file_operations snapshot_raw_fops = {
7258*4882a593Smuzhiyun .open = snapshot_raw_open,
7259*4882a593Smuzhiyun .read = tracing_buffers_read,
7260*4882a593Smuzhiyun .release = tracing_buffers_release,
7261*4882a593Smuzhiyun .splice_read = tracing_buffers_splice_read,
7262*4882a593Smuzhiyun .llseek = no_llseek,
7263*4882a593Smuzhiyun };
7264*4882a593Smuzhiyun
7265*4882a593Smuzhiyun #endif /* CONFIG_TRACER_SNAPSHOT */
7266*4882a593Smuzhiyun
7267*4882a593Smuzhiyun #define TRACING_LOG_ERRS_MAX 8
7268*4882a593Smuzhiyun #define TRACING_LOG_LOC_MAX 128
7269*4882a593Smuzhiyun
7270*4882a593Smuzhiyun #define CMD_PREFIX " Command: "
7271*4882a593Smuzhiyun
7272*4882a593Smuzhiyun struct err_info {
7273*4882a593Smuzhiyun const char **errs; /* ptr to loc-specific array of err strings */
7274*4882a593Smuzhiyun u8 type; /* index into errs -> specific err string */
7275*4882a593Smuzhiyun u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7276*4882a593Smuzhiyun u64 ts;
7277*4882a593Smuzhiyun };
7278*4882a593Smuzhiyun
7279*4882a593Smuzhiyun struct tracing_log_err {
7280*4882a593Smuzhiyun struct list_head list;
7281*4882a593Smuzhiyun struct err_info info;
7282*4882a593Smuzhiyun char loc[TRACING_LOG_LOC_MAX]; /* err location */
7283*4882a593Smuzhiyun char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7284*4882a593Smuzhiyun };
7285*4882a593Smuzhiyun
7286*4882a593Smuzhiyun static DEFINE_MUTEX(tracing_err_log_lock);
7287*4882a593Smuzhiyun
get_tracing_log_err(struct trace_array * tr)7288*4882a593Smuzhiyun static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7289*4882a593Smuzhiyun {
7290*4882a593Smuzhiyun struct tracing_log_err *err;
7291*4882a593Smuzhiyun
7292*4882a593Smuzhiyun if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7293*4882a593Smuzhiyun err = kzalloc(sizeof(*err), GFP_KERNEL);
7294*4882a593Smuzhiyun if (!err)
7295*4882a593Smuzhiyun err = ERR_PTR(-ENOMEM);
7296*4882a593Smuzhiyun else
7297*4882a593Smuzhiyun tr->n_err_log_entries++;
7298*4882a593Smuzhiyun
7299*4882a593Smuzhiyun return err;
7300*4882a593Smuzhiyun }
7301*4882a593Smuzhiyun
7302*4882a593Smuzhiyun err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7303*4882a593Smuzhiyun list_del(&err->list);
7304*4882a593Smuzhiyun
7305*4882a593Smuzhiyun return err;
7306*4882a593Smuzhiyun }
7307*4882a593Smuzhiyun
7308*4882a593Smuzhiyun /**
7309*4882a593Smuzhiyun * err_pos - find the position of a string within a command for error careting
7310*4882a593Smuzhiyun * @cmd: The tracing command that caused the error
7311*4882a593Smuzhiyun * @str: The string to position the caret at within @cmd
7312*4882a593Smuzhiyun *
7313*4882a593Smuzhiyun * Finds the position of the first occurence of @str within @cmd. The
7314*4882a593Smuzhiyun * return value can be passed to tracing_log_err() for caret placement
7315*4882a593Smuzhiyun * within @cmd.
7316*4882a593Smuzhiyun *
7317*4882a593Smuzhiyun * Returns the index within @cmd of the first occurence of @str or 0
7318*4882a593Smuzhiyun * if @str was not found.
7319*4882a593Smuzhiyun */
err_pos(char * cmd,const char * str)7320*4882a593Smuzhiyun unsigned int err_pos(char *cmd, const char *str)
7321*4882a593Smuzhiyun {
7322*4882a593Smuzhiyun char *found;
7323*4882a593Smuzhiyun
7324*4882a593Smuzhiyun if (WARN_ON(!strlen(cmd)))
7325*4882a593Smuzhiyun return 0;
7326*4882a593Smuzhiyun
7327*4882a593Smuzhiyun found = strstr(cmd, str);
7328*4882a593Smuzhiyun if (found)
7329*4882a593Smuzhiyun return found - cmd;
7330*4882a593Smuzhiyun
7331*4882a593Smuzhiyun return 0;
7332*4882a593Smuzhiyun }
7333*4882a593Smuzhiyun
7334*4882a593Smuzhiyun /**
7335*4882a593Smuzhiyun * tracing_log_err - write an error to the tracing error log
7336*4882a593Smuzhiyun * @tr: The associated trace array for the error (NULL for top level array)
7337*4882a593Smuzhiyun * @loc: A string describing where the error occurred
7338*4882a593Smuzhiyun * @cmd: The tracing command that caused the error
7339*4882a593Smuzhiyun * @errs: The array of loc-specific static error strings
7340*4882a593Smuzhiyun * @type: The index into errs[], which produces the specific static err string
7341*4882a593Smuzhiyun * @pos: The position the caret should be placed in the cmd
7342*4882a593Smuzhiyun *
7343*4882a593Smuzhiyun * Writes an error into tracing/error_log of the form:
7344*4882a593Smuzhiyun *
7345*4882a593Smuzhiyun * <loc>: error: <text>
7346*4882a593Smuzhiyun * Command: <cmd>
7347*4882a593Smuzhiyun * ^
7348*4882a593Smuzhiyun *
7349*4882a593Smuzhiyun * tracing/error_log is a small log file containing the last
7350*4882a593Smuzhiyun * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7351*4882a593Smuzhiyun * unless there has been a tracing error, and the error log can be
7352*4882a593Smuzhiyun * cleared and have its memory freed by writing the empty string in
7353*4882a593Smuzhiyun * truncation mode to it i.e. echo > tracing/error_log.
7354*4882a593Smuzhiyun *
7355*4882a593Smuzhiyun * NOTE: the @errs array along with the @type param are used to
7356*4882a593Smuzhiyun * produce a static error string - this string is not copied and saved
7357*4882a593Smuzhiyun * when the error is logged - only a pointer to it is saved. See
7358*4882a593Smuzhiyun * existing callers for examples of how static strings are typically
7359*4882a593Smuzhiyun * defined for use with tracing_log_err().
7360*4882a593Smuzhiyun */
tracing_log_err(struct trace_array * tr,const char * loc,const char * cmd,const char ** errs,u8 type,u8 pos)7361*4882a593Smuzhiyun void tracing_log_err(struct trace_array *tr,
7362*4882a593Smuzhiyun const char *loc, const char *cmd,
7363*4882a593Smuzhiyun const char **errs, u8 type, u8 pos)
7364*4882a593Smuzhiyun {
7365*4882a593Smuzhiyun struct tracing_log_err *err;
7366*4882a593Smuzhiyun
7367*4882a593Smuzhiyun if (!tr)
7368*4882a593Smuzhiyun tr = &global_trace;
7369*4882a593Smuzhiyun
7370*4882a593Smuzhiyun mutex_lock(&tracing_err_log_lock);
7371*4882a593Smuzhiyun err = get_tracing_log_err(tr);
7372*4882a593Smuzhiyun if (PTR_ERR(err) == -ENOMEM) {
7373*4882a593Smuzhiyun mutex_unlock(&tracing_err_log_lock);
7374*4882a593Smuzhiyun return;
7375*4882a593Smuzhiyun }
7376*4882a593Smuzhiyun
7377*4882a593Smuzhiyun snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7378*4882a593Smuzhiyun snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7379*4882a593Smuzhiyun
7380*4882a593Smuzhiyun err->info.errs = errs;
7381*4882a593Smuzhiyun err->info.type = type;
7382*4882a593Smuzhiyun err->info.pos = pos;
7383*4882a593Smuzhiyun err->info.ts = local_clock();
7384*4882a593Smuzhiyun
7385*4882a593Smuzhiyun list_add_tail(&err->list, &tr->err_log);
7386*4882a593Smuzhiyun mutex_unlock(&tracing_err_log_lock);
7387*4882a593Smuzhiyun }
7388*4882a593Smuzhiyun
clear_tracing_err_log(struct trace_array * tr)7389*4882a593Smuzhiyun static void clear_tracing_err_log(struct trace_array *tr)
7390*4882a593Smuzhiyun {
7391*4882a593Smuzhiyun struct tracing_log_err *err, *next;
7392*4882a593Smuzhiyun
7393*4882a593Smuzhiyun mutex_lock(&tracing_err_log_lock);
7394*4882a593Smuzhiyun list_for_each_entry_safe(err, next, &tr->err_log, list) {
7395*4882a593Smuzhiyun list_del(&err->list);
7396*4882a593Smuzhiyun kfree(err);
7397*4882a593Smuzhiyun }
7398*4882a593Smuzhiyun
7399*4882a593Smuzhiyun tr->n_err_log_entries = 0;
7400*4882a593Smuzhiyun mutex_unlock(&tracing_err_log_lock);
7401*4882a593Smuzhiyun }
7402*4882a593Smuzhiyun
tracing_err_log_seq_start(struct seq_file * m,loff_t * pos)7403*4882a593Smuzhiyun static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7404*4882a593Smuzhiyun {
7405*4882a593Smuzhiyun struct trace_array *tr = m->private;
7406*4882a593Smuzhiyun
7407*4882a593Smuzhiyun mutex_lock(&tracing_err_log_lock);
7408*4882a593Smuzhiyun
7409*4882a593Smuzhiyun return seq_list_start(&tr->err_log, *pos);
7410*4882a593Smuzhiyun }
7411*4882a593Smuzhiyun
tracing_err_log_seq_next(struct seq_file * m,void * v,loff_t * pos)7412*4882a593Smuzhiyun static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7413*4882a593Smuzhiyun {
7414*4882a593Smuzhiyun struct trace_array *tr = m->private;
7415*4882a593Smuzhiyun
7416*4882a593Smuzhiyun return seq_list_next(v, &tr->err_log, pos);
7417*4882a593Smuzhiyun }
7418*4882a593Smuzhiyun
tracing_err_log_seq_stop(struct seq_file * m,void * v)7419*4882a593Smuzhiyun static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7420*4882a593Smuzhiyun {
7421*4882a593Smuzhiyun mutex_unlock(&tracing_err_log_lock);
7422*4882a593Smuzhiyun }
7423*4882a593Smuzhiyun
tracing_err_log_show_pos(struct seq_file * m,u8 pos)7424*4882a593Smuzhiyun static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7425*4882a593Smuzhiyun {
7426*4882a593Smuzhiyun u8 i;
7427*4882a593Smuzhiyun
7428*4882a593Smuzhiyun for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7429*4882a593Smuzhiyun seq_putc(m, ' ');
7430*4882a593Smuzhiyun for (i = 0; i < pos; i++)
7431*4882a593Smuzhiyun seq_putc(m, ' ');
7432*4882a593Smuzhiyun seq_puts(m, "^\n");
7433*4882a593Smuzhiyun }
7434*4882a593Smuzhiyun
tracing_err_log_seq_show(struct seq_file * m,void * v)7435*4882a593Smuzhiyun static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7436*4882a593Smuzhiyun {
7437*4882a593Smuzhiyun struct tracing_log_err *err = v;
7438*4882a593Smuzhiyun
7439*4882a593Smuzhiyun if (err) {
7440*4882a593Smuzhiyun const char *err_text = err->info.errs[err->info.type];
7441*4882a593Smuzhiyun u64 sec = err->info.ts;
7442*4882a593Smuzhiyun u32 nsec;
7443*4882a593Smuzhiyun
7444*4882a593Smuzhiyun nsec = do_div(sec, NSEC_PER_SEC);
7445*4882a593Smuzhiyun seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7446*4882a593Smuzhiyun err->loc, err_text);
7447*4882a593Smuzhiyun seq_printf(m, "%s", err->cmd);
7448*4882a593Smuzhiyun tracing_err_log_show_pos(m, err->info.pos);
7449*4882a593Smuzhiyun }
7450*4882a593Smuzhiyun
7451*4882a593Smuzhiyun return 0;
7452*4882a593Smuzhiyun }
7453*4882a593Smuzhiyun
7454*4882a593Smuzhiyun static const struct seq_operations tracing_err_log_seq_ops = {
7455*4882a593Smuzhiyun .start = tracing_err_log_seq_start,
7456*4882a593Smuzhiyun .next = tracing_err_log_seq_next,
7457*4882a593Smuzhiyun .stop = tracing_err_log_seq_stop,
7458*4882a593Smuzhiyun .show = tracing_err_log_seq_show
7459*4882a593Smuzhiyun };
7460*4882a593Smuzhiyun
tracing_err_log_open(struct inode * inode,struct file * file)7461*4882a593Smuzhiyun static int tracing_err_log_open(struct inode *inode, struct file *file)
7462*4882a593Smuzhiyun {
7463*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
7464*4882a593Smuzhiyun int ret = 0;
7465*4882a593Smuzhiyun
7466*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
7467*4882a593Smuzhiyun if (ret)
7468*4882a593Smuzhiyun return ret;
7469*4882a593Smuzhiyun
7470*4882a593Smuzhiyun /* If this file was opened for write, then erase contents */
7471*4882a593Smuzhiyun if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7472*4882a593Smuzhiyun clear_tracing_err_log(tr);
7473*4882a593Smuzhiyun
7474*4882a593Smuzhiyun if (file->f_mode & FMODE_READ) {
7475*4882a593Smuzhiyun ret = seq_open(file, &tracing_err_log_seq_ops);
7476*4882a593Smuzhiyun if (!ret) {
7477*4882a593Smuzhiyun struct seq_file *m = file->private_data;
7478*4882a593Smuzhiyun m->private = tr;
7479*4882a593Smuzhiyun } else {
7480*4882a593Smuzhiyun trace_array_put(tr);
7481*4882a593Smuzhiyun }
7482*4882a593Smuzhiyun }
7483*4882a593Smuzhiyun return ret;
7484*4882a593Smuzhiyun }
7485*4882a593Smuzhiyun
tracing_err_log_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)7486*4882a593Smuzhiyun static ssize_t tracing_err_log_write(struct file *file,
7487*4882a593Smuzhiyun const char __user *buffer,
7488*4882a593Smuzhiyun size_t count, loff_t *ppos)
7489*4882a593Smuzhiyun {
7490*4882a593Smuzhiyun return count;
7491*4882a593Smuzhiyun }
7492*4882a593Smuzhiyun
tracing_err_log_release(struct inode * inode,struct file * file)7493*4882a593Smuzhiyun static int tracing_err_log_release(struct inode *inode, struct file *file)
7494*4882a593Smuzhiyun {
7495*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
7496*4882a593Smuzhiyun
7497*4882a593Smuzhiyun trace_array_put(tr);
7498*4882a593Smuzhiyun
7499*4882a593Smuzhiyun if (file->f_mode & FMODE_READ)
7500*4882a593Smuzhiyun seq_release(inode, file);
7501*4882a593Smuzhiyun
7502*4882a593Smuzhiyun return 0;
7503*4882a593Smuzhiyun }
7504*4882a593Smuzhiyun
7505*4882a593Smuzhiyun static const struct file_operations tracing_err_log_fops = {
7506*4882a593Smuzhiyun .open = tracing_err_log_open,
7507*4882a593Smuzhiyun .write = tracing_err_log_write,
7508*4882a593Smuzhiyun .read = seq_read,
7509*4882a593Smuzhiyun .llseek = seq_lseek,
7510*4882a593Smuzhiyun .release = tracing_err_log_release,
7511*4882a593Smuzhiyun };
7512*4882a593Smuzhiyun
tracing_buffers_open(struct inode * inode,struct file * filp)7513*4882a593Smuzhiyun static int tracing_buffers_open(struct inode *inode, struct file *filp)
7514*4882a593Smuzhiyun {
7515*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
7516*4882a593Smuzhiyun struct ftrace_buffer_info *info;
7517*4882a593Smuzhiyun int ret;
7518*4882a593Smuzhiyun
7519*4882a593Smuzhiyun ret = tracing_check_open_get_tr(tr);
7520*4882a593Smuzhiyun if (ret)
7521*4882a593Smuzhiyun return ret;
7522*4882a593Smuzhiyun
7523*4882a593Smuzhiyun info = kvzalloc(sizeof(*info), GFP_KERNEL);
7524*4882a593Smuzhiyun if (!info) {
7525*4882a593Smuzhiyun trace_array_put(tr);
7526*4882a593Smuzhiyun return -ENOMEM;
7527*4882a593Smuzhiyun }
7528*4882a593Smuzhiyun
7529*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
7530*4882a593Smuzhiyun
7531*4882a593Smuzhiyun info->iter.tr = tr;
7532*4882a593Smuzhiyun info->iter.cpu_file = tracing_get_cpu(inode);
7533*4882a593Smuzhiyun info->iter.trace = tr->current_trace;
7534*4882a593Smuzhiyun info->iter.array_buffer = &tr->array_buffer;
7535*4882a593Smuzhiyun info->spare = NULL;
7536*4882a593Smuzhiyun /* Force reading ring buffer for first read */
7537*4882a593Smuzhiyun info->read = (unsigned int)-1;
7538*4882a593Smuzhiyun
7539*4882a593Smuzhiyun filp->private_data = info;
7540*4882a593Smuzhiyun
7541*4882a593Smuzhiyun tr->trace_ref++;
7542*4882a593Smuzhiyun
7543*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
7544*4882a593Smuzhiyun
7545*4882a593Smuzhiyun ret = nonseekable_open(inode, filp);
7546*4882a593Smuzhiyun if (ret < 0)
7547*4882a593Smuzhiyun trace_array_put(tr);
7548*4882a593Smuzhiyun
7549*4882a593Smuzhiyun return ret;
7550*4882a593Smuzhiyun }
7551*4882a593Smuzhiyun
7552*4882a593Smuzhiyun static __poll_t
tracing_buffers_poll(struct file * filp,poll_table * poll_table)7553*4882a593Smuzhiyun tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7554*4882a593Smuzhiyun {
7555*4882a593Smuzhiyun struct ftrace_buffer_info *info = filp->private_data;
7556*4882a593Smuzhiyun struct trace_iterator *iter = &info->iter;
7557*4882a593Smuzhiyun
7558*4882a593Smuzhiyun return trace_poll(iter, filp, poll_table);
7559*4882a593Smuzhiyun }
7560*4882a593Smuzhiyun
7561*4882a593Smuzhiyun static ssize_t
tracing_buffers_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)7562*4882a593Smuzhiyun tracing_buffers_read(struct file *filp, char __user *ubuf,
7563*4882a593Smuzhiyun size_t count, loff_t *ppos)
7564*4882a593Smuzhiyun {
7565*4882a593Smuzhiyun struct ftrace_buffer_info *info = filp->private_data;
7566*4882a593Smuzhiyun struct trace_iterator *iter = &info->iter;
7567*4882a593Smuzhiyun ssize_t ret = 0;
7568*4882a593Smuzhiyun ssize_t size;
7569*4882a593Smuzhiyun
7570*4882a593Smuzhiyun if (!count)
7571*4882a593Smuzhiyun return 0;
7572*4882a593Smuzhiyun
7573*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
7574*4882a593Smuzhiyun if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7575*4882a593Smuzhiyun return -EBUSY;
7576*4882a593Smuzhiyun #endif
7577*4882a593Smuzhiyun
7578*4882a593Smuzhiyun if (!info->spare) {
7579*4882a593Smuzhiyun info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7580*4882a593Smuzhiyun iter->cpu_file);
7581*4882a593Smuzhiyun if (IS_ERR(info->spare)) {
7582*4882a593Smuzhiyun ret = PTR_ERR(info->spare);
7583*4882a593Smuzhiyun info->spare = NULL;
7584*4882a593Smuzhiyun } else {
7585*4882a593Smuzhiyun info->spare_cpu = iter->cpu_file;
7586*4882a593Smuzhiyun }
7587*4882a593Smuzhiyun }
7588*4882a593Smuzhiyun if (!info->spare)
7589*4882a593Smuzhiyun return ret;
7590*4882a593Smuzhiyun
7591*4882a593Smuzhiyun /* Do we have previous read data to read? */
7592*4882a593Smuzhiyun if (info->read < PAGE_SIZE)
7593*4882a593Smuzhiyun goto read;
7594*4882a593Smuzhiyun
7595*4882a593Smuzhiyun again:
7596*4882a593Smuzhiyun trace_access_lock(iter->cpu_file);
7597*4882a593Smuzhiyun ret = ring_buffer_read_page(iter->array_buffer->buffer,
7598*4882a593Smuzhiyun &info->spare,
7599*4882a593Smuzhiyun count,
7600*4882a593Smuzhiyun iter->cpu_file, 0);
7601*4882a593Smuzhiyun trace_access_unlock(iter->cpu_file);
7602*4882a593Smuzhiyun
7603*4882a593Smuzhiyun if (ret < 0) {
7604*4882a593Smuzhiyun if (trace_empty(iter)) {
7605*4882a593Smuzhiyun if ((filp->f_flags & O_NONBLOCK))
7606*4882a593Smuzhiyun return -EAGAIN;
7607*4882a593Smuzhiyun
7608*4882a593Smuzhiyun ret = wait_on_pipe(iter, 0);
7609*4882a593Smuzhiyun if (ret)
7610*4882a593Smuzhiyun return ret;
7611*4882a593Smuzhiyun
7612*4882a593Smuzhiyun goto again;
7613*4882a593Smuzhiyun }
7614*4882a593Smuzhiyun return 0;
7615*4882a593Smuzhiyun }
7616*4882a593Smuzhiyun
7617*4882a593Smuzhiyun info->read = 0;
7618*4882a593Smuzhiyun read:
7619*4882a593Smuzhiyun size = PAGE_SIZE - info->read;
7620*4882a593Smuzhiyun if (size > count)
7621*4882a593Smuzhiyun size = count;
7622*4882a593Smuzhiyun
7623*4882a593Smuzhiyun ret = copy_to_user(ubuf, info->spare + info->read, size);
7624*4882a593Smuzhiyun if (ret == size)
7625*4882a593Smuzhiyun return -EFAULT;
7626*4882a593Smuzhiyun
7627*4882a593Smuzhiyun size -= ret;
7628*4882a593Smuzhiyun
7629*4882a593Smuzhiyun *ppos += size;
7630*4882a593Smuzhiyun info->read += size;
7631*4882a593Smuzhiyun
7632*4882a593Smuzhiyun return size;
7633*4882a593Smuzhiyun }
7634*4882a593Smuzhiyun
tracing_buffers_release(struct inode * inode,struct file * file)7635*4882a593Smuzhiyun static int tracing_buffers_release(struct inode *inode, struct file *file)
7636*4882a593Smuzhiyun {
7637*4882a593Smuzhiyun struct ftrace_buffer_info *info = file->private_data;
7638*4882a593Smuzhiyun struct trace_iterator *iter = &info->iter;
7639*4882a593Smuzhiyun
7640*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
7641*4882a593Smuzhiyun
7642*4882a593Smuzhiyun iter->tr->trace_ref--;
7643*4882a593Smuzhiyun
7644*4882a593Smuzhiyun __trace_array_put(iter->tr);
7645*4882a593Smuzhiyun
7646*4882a593Smuzhiyun if (info->spare)
7647*4882a593Smuzhiyun ring_buffer_free_read_page(iter->array_buffer->buffer,
7648*4882a593Smuzhiyun info->spare_cpu, info->spare);
7649*4882a593Smuzhiyun kvfree(info);
7650*4882a593Smuzhiyun
7651*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
7652*4882a593Smuzhiyun
7653*4882a593Smuzhiyun return 0;
7654*4882a593Smuzhiyun }
7655*4882a593Smuzhiyun
7656*4882a593Smuzhiyun struct buffer_ref {
7657*4882a593Smuzhiyun struct trace_buffer *buffer;
7658*4882a593Smuzhiyun void *page;
7659*4882a593Smuzhiyun int cpu;
7660*4882a593Smuzhiyun refcount_t refcount;
7661*4882a593Smuzhiyun };
7662*4882a593Smuzhiyun
buffer_ref_release(struct buffer_ref * ref)7663*4882a593Smuzhiyun static void buffer_ref_release(struct buffer_ref *ref)
7664*4882a593Smuzhiyun {
7665*4882a593Smuzhiyun if (!refcount_dec_and_test(&ref->refcount))
7666*4882a593Smuzhiyun return;
7667*4882a593Smuzhiyun ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7668*4882a593Smuzhiyun kfree(ref);
7669*4882a593Smuzhiyun }
7670*4882a593Smuzhiyun
buffer_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)7671*4882a593Smuzhiyun static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7672*4882a593Smuzhiyun struct pipe_buffer *buf)
7673*4882a593Smuzhiyun {
7674*4882a593Smuzhiyun struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7675*4882a593Smuzhiyun
7676*4882a593Smuzhiyun buffer_ref_release(ref);
7677*4882a593Smuzhiyun buf->private = 0;
7678*4882a593Smuzhiyun }
7679*4882a593Smuzhiyun
buffer_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)7680*4882a593Smuzhiyun static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7681*4882a593Smuzhiyun struct pipe_buffer *buf)
7682*4882a593Smuzhiyun {
7683*4882a593Smuzhiyun struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7684*4882a593Smuzhiyun
7685*4882a593Smuzhiyun if (refcount_read(&ref->refcount) > INT_MAX/2)
7686*4882a593Smuzhiyun return false;
7687*4882a593Smuzhiyun
7688*4882a593Smuzhiyun refcount_inc(&ref->refcount);
7689*4882a593Smuzhiyun return true;
7690*4882a593Smuzhiyun }
7691*4882a593Smuzhiyun
7692*4882a593Smuzhiyun /* Pipe buffer operations for a buffer. */
7693*4882a593Smuzhiyun static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7694*4882a593Smuzhiyun .release = buffer_pipe_buf_release,
7695*4882a593Smuzhiyun .get = buffer_pipe_buf_get,
7696*4882a593Smuzhiyun };
7697*4882a593Smuzhiyun
7698*4882a593Smuzhiyun /*
7699*4882a593Smuzhiyun * Callback from splice_to_pipe(), if we need to release some pages
7700*4882a593Smuzhiyun * at the end of the spd in case we error'ed out in filling the pipe.
7701*4882a593Smuzhiyun */
buffer_spd_release(struct splice_pipe_desc * spd,unsigned int i)7702*4882a593Smuzhiyun static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7703*4882a593Smuzhiyun {
7704*4882a593Smuzhiyun struct buffer_ref *ref =
7705*4882a593Smuzhiyun (struct buffer_ref *)spd->partial[i].private;
7706*4882a593Smuzhiyun
7707*4882a593Smuzhiyun buffer_ref_release(ref);
7708*4882a593Smuzhiyun spd->partial[i].private = 0;
7709*4882a593Smuzhiyun }
7710*4882a593Smuzhiyun
7711*4882a593Smuzhiyun static ssize_t
tracing_buffers_splice_read(struct file * file,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)7712*4882a593Smuzhiyun tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7713*4882a593Smuzhiyun struct pipe_inode_info *pipe, size_t len,
7714*4882a593Smuzhiyun unsigned int flags)
7715*4882a593Smuzhiyun {
7716*4882a593Smuzhiyun struct ftrace_buffer_info *info = file->private_data;
7717*4882a593Smuzhiyun struct trace_iterator *iter = &info->iter;
7718*4882a593Smuzhiyun struct partial_page partial_def[PIPE_DEF_BUFFERS];
7719*4882a593Smuzhiyun struct page *pages_def[PIPE_DEF_BUFFERS];
7720*4882a593Smuzhiyun struct splice_pipe_desc spd = {
7721*4882a593Smuzhiyun .pages = pages_def,
7722*4882a593Smuzhiyun .partial = partial_def,
7723*4882a593Smuzhiyun .nr_pages_max = PIPE_DEF_BUFFERS,
7724*4882a593Smuzhiyun .ops = &buffer_pipe_buf_ops,
7725*4882a593Smuzhiyun .spd_release = buffer_spd_release,
7726*4882a593Smuzhiyun };
7727*4882a593Smuzhiyun struct buffer_ref *ref;
7728*4882a593Smuzhiyun int entries, i;
7729*4882a593Smuzhiyun ssize_t ret = 0;
7730*4882a593Smuzhiyun
7731*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
7732*4882a593Smuzhiyun if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7733*4882a593Smuzhiyun return -EBUSY;
7734*4882a593Smuzhiyun #endif
7735*4882a593Smuzhiyun
7736*4882a593Smuzhiyun if (*ppos & (PAGE_SIZE - 1))
7737*4882a593Smuzhiyun return -EINVAL;
7738*4882a593Smuzhiyun
7739*4882a593Smuzhiyun if (len & (PAGE_SIZE - 1)) {
7740*4882a593Smuzhiyun if (len < PAGE_SIZE)
7741*4882a593Smuzhiyun return -EINVAL;
7742*4882a593Smuzhiyun len &= PAGE_MASK;
7743*4882a593Smuzhiyun }
7744*4882a593Smuzhiyun
7745*4882a593Smuzhiyun if (splice_grow_spd(pipe, &spd))
7746*4882a593Smuzhiyun return -ENOMEM;
7747*4882a593Smuzhiyun
7748*4882a593Smuzhiyun again:
7749*4882a593Smuzhiyun trace_access_lock(iter->cpu_file);
7750*4882a593Smuzhiyun entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7751*4882a593Smuzhiyun
7752*4882a593Smuzhiyun for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7753*4882a593Smuzhiyun struct page *page;
7754*4882a593Smuzhiyun int r;
7755*4882a593Smuzhiyun
7756*4882a593Smuzhiyun ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7757*4882a593Smuzhiyun if (!ref) {
7758*4882a593Smuzhiyun ret = -ENOMEM;
7759*4882a593Smuzhiyun break;
7760*4882a593Smuzhiyun }
7761*4882a593Smuzhiyun
7762*4882a593Smuzhiyun refcount_set(&ref->refcount, 1);
7763*4882a593Smuzhiyun ref->buffer = iter->array_buffer->buffer;
7764*4882a593Smuzhiyun ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7765*4882a593Smuzhiyun if (IS_ERR(ref->page)) {
7766*4882a593Smuzhiyun ret = PTR_ERR(ref->page);
7767*4882a593Smuzhiyun ref->page = NULL;
7768*4882a593Smuzhiyun kfree(ref);
7769*4882a593Smuzhiyun break;
7770*4882a593Smuzhiyun }
7771*4882a593Smuzhiyun ref->cpu = iter->cpu_file;
7772*4882a593Smuzhiyun
7773*4882a593Smuzhiyun r = ring_buffer_read_page(ref->buffer, &ref->page,
7774*4882a593Smuzhiyun len, iter->cpu_file, 1);
7775*4882a593Smuzhiyun if (r < 0) {
7776*4882a593Smuzhiyun ring_buffer_free_read_page(ref->buffer, ref->cpu,
7777*4882a593Smuzhiyun ref->page);
7778*4882a593Smuzhiyun kfree(ref);
7779*4882a593Smuzhiyun break;
7780*4882a593Smuzhiyun }
7781*4882a593Smuzhiyun
7782*4882a593Smuzhiyun page = virt_to_page(ref->page);
7783*4882a593Smuzhiyun
7784*4882a593Smuzhiyun spd.pages[i] = page;
7785*4882a593Smuzhiyun spd.partial[i].len = PAGE_SIZE;
7786*4882a593Smuzhiyun spd.partial[i].offset = 0;
7787*4882a593Smuzhiyun spd.partial[i].private = (unsigned long)ref;
7788*4882a593Smuzhiyun spd.nr_pages++;
7789*4882a593Smuzhiyun *ppos += PAGE_SIZE;
7790*4882a593Smuzhiyun
7791*4882a593Smuzhiyun entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7792*4882a593Smuzhiyun }
7793*4882a593Smuzhiyun
7794*4882a593Smuzhiyun trace_access_unlock(iter->cpu_file);
7795*4882a593Smuzhiyun spd.nr_pages = i;
7796*4882a593Smuzhiyun
7797*4882a593Smuzhiyun /* did we read anything? */
7798*4882a593Smuzhiyun if (!spd.nr_pages) {
7799*4882a593Smuzhiyun if (ret)
7800*4882a593Smuzhiyun goto out;
7801*4882a593Smuzhiyun
7802*4882a593Smuzhiyun ret = -EAGAIN;
7803*4882a593Smuzhiyun if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7804*4882a593Smuzhiyun goto out;
7805*4882a593Smuzhiyun
7806*4882a593Smuzhiyun ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7807*4882a593Smuzhiyun if (ret)
7808*4882a593Smuzhiyun goto out;
7809*4882a593Smuzhiyun
7810*4882a593Smuzhiyun goto again;
7811*4882a593Smuzhiyun }
7812*4882a593Smuzhiyun
7813*4882a593Smuzhiyun ret = splice_to_pipe(pipe, &spd);
7814*4882a593Smuzhiyun out:
7815*4882a593Smuzhiyun splice_shrink_spd(&spd);
7816*4882a593Smuzhiyun
7817*4882a593Smuzhiyun return ret;
7818*4882a593Smuzhiyun }
7819*4882a593Smuzhiyun
7820*4882a593Smuzhiyun static const struct file_operations tracing_buffers_fops = {
7821*4882a593Smuzhiyun .open = tracing_buffers_open,
7822*4882a593Smuzhiyun .read = tracing_buffers_read,
7823*4882a593Smuzhiyun .poll = tracing_buffers_poll,
7824*4882a593Smuzhiyun .release = tracing_buffers_release,
7825*4882a593Smuzhiyun .splice_read = tracing_buffers_splice_read,
7826*4882a593Smuzhiyun .llseek = no_llseek,
7827*4882a593Smuzhiyun };
7828*4882a593Smuzhiyun
7829*4882a593Smuzhiyun static ssize_t
tracing_stats_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)7830*4882a593Smuzhiyun tracing_stats_read(struct file *filp, char __user *ubuf,
7831*4882a593Smuzhiyun size_t count, loff_t *ppos)
7832*4882a593Smuzhiyun {
7833*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
7834*4882a593Smuzhiyun struct trace_array *tr = inode->i_private;
7835*4882a593Smuzhiyun struct array_buffer *trace_buf = &tr->array_buffer;
7836*4882a593Smuzhiyun int cpu = tracing_get_cpu(inode);
7837*4882a593Smuzhiyun struct trace_seq *s;
7838*4882a593Smuzhiyun unsigned long cnt;
7839*4882a593Smuzhiyun unsigned long long t;
7840*4882a593Smuzhiyun unsigned long usec_rem;
7841*4882a593Smuzhiyun
7842*4882a593Smuzhiyun s = kmalloc(sizeof(*s), GFP_KERNEL);
7843*4882a593Smuzhiyun if (!s)
7844*4882a593Smuzhiyun return -ENOMEM;
7845*4882a593Smuzhiyun
7846*4882a593Smuzhiyun trace_seq_init(s);
7847*4882a593Smuzhiyun
7848*4882a593Smuzhiyun cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7849*4882a593Smuzhiyun trace_seq_printf(s, "entries: %ld\n", cnt);
7850*4882a593Smuzhiyun
7851*4882a593Smuzhiyun cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7852*4882a593Smuzhiyun trace_seq_printf(s, "overrun: %ld\n", cnt);
7853*4882a593Smuzhiyun
7854*4882a593Smuzhiyun cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7855*4882a593Smuzhiyun trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7856*4882a593Smuzhiyun
7857*4882a593Smuzhiyun cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7858*4882a593Smuzhiyun trace_seq_printf(s, "bytes: %ld\n", cnt);
7859*4882a593Smuzhiyun
7860*4882a593Smuzhiyun if (trace_clocks[tr->clock_id].in_ns) {
7861*4882a593Smuzhiyun /* local or global for trace_clock */
7862*4882a593Smuzhiyun t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7863*4882a593Smuzhiyun usec_rem = do_div(t, USEC_PER_SEC);
7864*4882a593Smuzhiyun trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7865*4882a593Smuzhiyun t, usec_rem);
7866*4882a593Smuzhiyun
7867*4882a593Smuzhiyun t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7868*4882a593Smuzhiyun usec_rem = do_div(t, USEC_PER_SEC);
7869*4882a593Smuzhiyun trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7870*4882a593Smuzhiyun } else {
7871*4882a593Smuzhiyun /* counter or tsc mode for trace_clock */
7872*4882a593Smuzhiyun trace_seq_printf(s, "oldest event ts: %llu\n",
7873*4882a593Smuzhiyun ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7874*4882a593Smuzhiyun
7875*4882a593Smuzhiyun trace_seq_printf(s, "now ts: %llu\n",
7876*4882a593Smuzhiyun ring_buffer_time_stamp(trace_buf->buffer, cpu));
7877*4882a593Smuzhiyun }
7878*4882a593Smuzhiyun
7879*4882a593Smuzhiyun cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7880*4882a593Smuzhiyun trace_seq_printf(s, "dropped events: %ld\n", cnt);
7881*4882a593Smuzhiyun
7882*4882a593Smuzhiyun cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7883*4882a593Smuzhiyun trace_seq_printf(s, "read events: %ld\n", cnt);
7884*4882a593Smuzhiyun
7885*4882a593Smuzhiyun count = simple_read_from_buffer(ubuf, count, ppos,
7886*4882a593Smuzhiyun s->buffer, trace_seq_used(s));
7887*4882a593Smuzhiyun
7888*4882a593Smuzhiyun kfree(s);
7889*4882a593Smuzhiyun
7890*4882a593Smuzhiyun return count;
7891*4882a593Smuzhiyun }
7892*4882a593Smuzhiyun
7893*4882a593Smuzhiyun static const struct file_operations tracing_stats_fops = {
7894*4882a593Smuzhiyun .open = tracing_open_generic_tr,
7895*4882a593Smuzhiyun .read = tracing_stats_read,
7896*4882a593Smuzhiyun .llseek = generic_file_llseek,
7897*4882a593Smuzhiyun .release = tracing_release_generic_tr,
7898*4882a593Smuzhiyun };
7899*4882a593Smuzhiyun
7900*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_FTRACE
7901*4882a593Smuzhiyun
7902*4882a593Smuzhiyun static ssize_t
tracing_read_dyn_info(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)7903*4882a593Smuzhiyun tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7904*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
7905*4882a593Smuzhiyun {
7906*4882a593Smuzhiyun ssize_t ret;
7907*4882a593Smuzhiyun char *buf;
7908*4882a593Smuzhiyun int r;
7909*4882a593Smuzhiyun
7910*4882a593Smuzhiyun /* 256 should be plenty to hold the amount needed */
7911*4882a593Smuzhiyun buf = kmalloc(256, GFP_KERNEL);
7912*4882a593Smuzhiyun if (!buf)
7913*4882a593Smuzhiyun return -ENOMEM;
7914*4882a593Smuzhiyun
7915*4882a593Smuzhiyun r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7916*4882a593Smuzhiyun ftrace_update_tot_cnt,
7917*4882a593Smuzhiyun ftrace_number_of_pages,
7918*4882a593Smuzhiyun ftrace_number_of_groups);
7919*4882a593Smuzhiyun
7920*4882a593Smuzhiyun ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7921*4882a593Smuzhiyun kfree(buf);
7922*4882a593Smuzhiyun return ret;
7923*4882a593Smuzhiyun }
7924*4882a593Smuzhiyun
7925*4882a593Smuzhiyun static const struct file_operations tracing_dyn_info_fops = {
7926*4882a593Smuzhiyun .open = tracing_open_generic,
7927*4882a593Smuzhiyun .read = tracing_read_dyn_info,
7928*4882a593Smuzhiyun .llseek = generic_file_llseek,
7929*4882a593Smuzhiyun };
7930*4882a593Smuzhiyun #endif /* CONFIG_DYNAMIC_FTRACE */
7931*4882a593Smuzhiyun
7932*4882a593Smuzhiyun #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7933*4882a593Smuzhiyun static void
ftrace_snapshot(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)7934*4882a593Smuzhiyun ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7935*4882a593Smuzhiyun struct trace_array *tr, struct ftrace_probe_ops *ops,
7936*4882a593Smuzhiyun void *data)
7937*4882a593Smuzhiyun {
7938*4882a593Smuzhiyun tracing_snapshot_instance(tr);
7939*4882a593Smuzhiyun }
7940*4882a593Smuzhiyun
7941*4882a593Smuzhiyun static void
ftrace_count_snapshot(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)7942*4882a593Smuzhiyun ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7943*4882a593Smuzhiyun struct trace_array *tr, struct ftrace_probe_ops *ops,
7944*4882a593Smuzhiyun void *data)
7945*4882a593Smuzhiyun {
7946*4882a593Smuzhiyun struct ftrace_func_mapper *mapper = data;
7947*4882a593Smuzhiyun long *count = NULL;
7948*4882a593Smuzhiyun
7949*4882a593Smuzhiyun if (mapper)
7950*4882a593Smuzhiyun count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7951*4882a593Smuzhiyun
7952*4882a593Smuzhiyun if (count) {
7953*4882a593Smuzhiyun
7954*4882a593Smuzhiyun if (*count <= 0)
7955*4882a593Smuzhiyun return;
7956*4882a593Smuzhiyun
7957*4882a593Smuzhiyun (*count)--;
7958*4882a593Smuzhiyun }
7959*4882a593Smuzhiyun
7960*4882a593Smuzhiyun tracing_snapshot_instance(tr);
7961*4882a593Smuzhiyun }
7962*4882a593Smuzhiyun
7963*4882a593Smuzhiyun static int
ftrace_snapshot_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)7964*4882a593Smuzhiyun ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7965*4882a593Smuzhiyun struct ftrace_probe_ops *ops, void *data)
7966*4882a593Smuzhiyun {
7967*4882a593Smuzhiyun struct ftrace_func_mapper *mapper = data;
7968*4882a593Smuzhiyun long *count = NULL;
7969*4882a593Smuzhiyun
7970*4882a593Smuzhiyun seq_printf(m, "%ps:", (void *)ip);
7971*4882a593Smuzhiyun
7972*4882a593Smuzhiyun seq_puts(m, "snapshot");
7973*4882a593Smuzhiyun
7974*4882a593Smuzhiyun if (mapper)
7975*4882a593Smuzhiyun count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7976*4882a593Smuzhiyun
7977*4882a593Smuzhiyun if (count)
7978*4882a593Smuzhiyun seq_printf(m, ":count=%ld\n", *count);
7979*4882a593Smuzhiyun else
7980*4882a593Smuzhiyun seq_puts(m, ":unlimited\n");
7981*4882a593Smuzhiyun
7982*4882a593Smuzhiyun return 0;
7983*4882a593Smuzhiyun }
7984*4882a593Smuzhiyun
7985*4882a593Smuzhiyun static int
ftrace_snapshot_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)7986*4882a593Smuzhiyun ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7987*4882a593Smuzhiyun unsigned long ip, void *init_data, void **data)
7988*4882a593Smuzhiyun {
7989*4882a593Smuzhiyun struct ftrace_func_mapper *mapper = *data;
7990*4882a593Smuzhiyun
7991*4882a593Smuzhiyun if (!mapper) {
7992*4882a593Smuzhiyun mapper = allocate_ftrace_func_mapper();
7993*4882a593Smuzhiyun if (!mapper)
7994*4882a593Smuzhiyun return -ENOMEM;
7995*4882a593Smuzhiyun *data = mapper;
7996*4882a593Smuzhiyun }
7997*4882a593Smuzhiyun
7998*4882a593Smuzhiyun return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7999*4882a593Smuzhiyun }
8000*4882a593Smuzhiyun
8001*4882a593Smuzhiyun static void
ftrace_snapshot_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)8002*4882a593Smuzhiyun ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8003*4882a593Smuzhiyun unsigned long ip, void *data)
8004*4882a593Smuzhiyun {
8005*4882a593Smuzhiyun struct ftrace_func_mapper *mapper = data;
8006*4882a593Smuzhiyun
8007*4882a593Smuzhiyun if (!ip) {
8008*4882a593Smuzhiyun if (!mapper)
8009*4882a593Smuzhiyun return;
8010*4882a593Smuzhiyun free_ftrace_func_mapper(mapper, NULL);
8011*4882a593Smuzhiyun return;
8012*4882a593Smuzhiyun }
8013*4882a593Smuzhiyun
8014*4882a593Smuzhiyun ftrace_func_mapper_remove_ip(mapper, ip);
8015*4882a593Smuzhiyun }
8016*4882a593Smuzhiyun
8017*4882a593Smuzhiyun static struct ftrace_probe_ops snapshot_probe_ops = {
8018*4882a593Smuzhiyun .func = ftrace_snapshot,
8019*4882a593Smuzhiyun .print = ftrace_snapshot_print,
8020*4882a593Smuzhiyun };
8021*4882a593Smuzhiyun
8022*4882a593Smuzhiyun static struct ftrace_probe_ops snapshot_count_probe_ops = {
8023*4882a593Smuzhiyun .func = ftrace_count_snapshot,
8024*4882a593Smuzhiyun .print = ftrace_snapshot_print,
8025*4882a593Smuzhiyun .init = ftrace_snapshot_init,
8026*4882a593Smuzhiyun .free = ftrace_snapshot_free,
8027*4882a593Smuzhiyun };
8028*4882a593Smuzhiyun
8029*4882a593Smuzhiyun static int
ftrace_trace_snapshot_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)8030*4882a593Smuzhiyun ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8031*4882a593Smuzhiyun char *glob, char *cmd, char *param, int enable)
8032*4882a593Smuzhiyun {
8033*4882a593Smuzhiyun struct ftrace_probe_ops *ops;
8034*4882a593Smuzhiyun void *count = (void *)-1;
8035*4882a593Smuzhiyun char *number;
8036*4882a593Smuzhiyun int ret;
8037*4882a593Smuzhiyun
8038*4882a593Smuzhiyun if (!tr)
8039*4882a593Smuzhiyun return -ENODEV;
8040*4882a593Smuzhiyun
8041*4882a593Smuzhiyun /* hash funcs only work with set_ftrace_filter */
8042*4882a593Smuzhiyun if (!enable)
8043*4882a593Smuzhiyun return -EINVAL;
8044*4882a593Smuzhiyun
8045*4882a593Smuzhiyun ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8046*4882a593Smuzhiyun
8047*4882a593Smuzhiyun if (glob[0] == '!')
8048*4882a593Smuzhiyun return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8049*4882a593Smuzhiyun
8050*4882a593Smuzhiyun if (!param)
8051*4882a593Smuzhiyun goto out_reg;
8052*4882a593Smuzhiyun
8053*4882a593Smuzhiyun number = strsep(¶m, ":");
8054*4882a593Smuzhiyun
8055*4882a593Smuzhiyun if (!strlen(number))
8056*4882a593Smuzhiyun goto out_reg;
8057*4882a593Smuzhiyun
8058*4882a593Smuzhiyun /*
8059*4882a593Smuzhiyun * We use the callback data field (which is a pointer)
8060*4882a593Smuzhiyun * as our counter.
8061*4882a593Smuzhiyun */
8062*4882a593Smuzhiyun ret = kstrtoul(number, 0, (unsigned long *)&count);
8063*4882a593Smuzhiyun if (ret)
8064*4882a593Smuzhiyun return ret;
8065*4882a593Smuzhiyun
8066*4882a593Smuzhiyun out_reg:
8067*4882a593Smuzhiyun ret = tracing_alloc_snapshot_instance(tr);
8068*4882a593Smuzhiyun if (ret < 0)
8069*4882a593Smuzhiyun goto out;
8070*4882a593Smuzhiyun
8071*4882a593Smuzhiyun ret = register_ftrace_function_probe(glob, tr, ops, count);
8072*4882a593Smuzhiyun
8073*4882a593Smuzhiyun out:
8074*4882a593Smuzhiyun return ret < 0 ? ret : 0;
8075*4882a593Smuzhiyun }
8076*4882a593Smuzhiyun
8077*4882a593Smuzhiyun static struct ftrace_func_command ftrace_snapshot_cmd = {
8078*4882a593Smuzhiyun .name = "snapshot",
8079*4882a593Smuzhiyun .func = ftrace_trace_snapshot_callback,
8080*4882a593Smuzhiyun };
8081*4882a593Smuzhiyun
register_snapshot_cmd(void)8082*4882a593Smuzhiyun static __init int register_snapshot_cmd(void)
8083*4882a593Smuzhiyun {
8084*4882a593Smuzhiyun return register_ftrace_command(&ftrace_snapshot_cmd);
8085*4882a593Smuzhiyun }
8086*4882a593Smuzhiyun #else
register_snapshot_cmd(void)8087*4882a593Smuzhiyun static inline __init int register_snapshot_cmd(void) { return 0; }
8088*4882a593Smuzhiyun #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8089*4882a593Smuzhiyun
tracing_get_dentry(struct trace_array * tr)8090*4882a593Smuzhiyun static struct dentry *tracing_get_dentry(struct trace_array *tr)
8091*4882a593Smuzhiyun {
8092*4882a593Smuzhiyun if (WARN_ON(!tr->dir))
8093*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
8094*4882a593Smuzhiyun
8095*4882a593Smuzhiyun /* Top directory uses NULL as the parent */
8096*4882a593Smuzhiyun if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8097*4882a593Smuzhiyun return NULL;
8098*4882a593Smuzhiyun
8099*4882a593Smuzhiyun /* All sub buffers have a descriptor */
8100*4882a593Smuzhiyun return tr->dir;
8101*4882a593Smuzhiyun }
8102*4882a593Smuzhiyun
tracing_dentry_percpu(struct trace_array * tr,int cpu)8103*4882a593Smuzhiyun static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8104*4882a593Smuzhiyun {
8105*4882a593Smuzhiyun struct dentry *d_tracer;
8106*4882a593Smuzhiyun
8107*4882a593Smuzhiyun if (tr->percpu_dir)
8108*4882a593Smuzhiyun return tr->percpu_dir;
8109*4882a593Smuzhiyun
8110*4882a593Smuzhiyun d_tracer = tracing_get_dentry(tr);
8111*4882a593Smuzhiyun if (IS_ERR(d_tracer))
8112*4882a593Smuzhiyun return NULL;
8113*4882a593Smuzhiyun
8114*4882a593Smuzhiyun tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8115*4882a593Smuzhiyun
8116*4882a593Smuzhiyun MEM_FAIL(!tr->percpu_dir,
8117*4882a593Smuzhiyun "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8118*4882a593Smuzhiyun
8119*4882a593Smuzhiyun return tr->percpu_dir;
8120*4882a593Smuzhiyun }
8121*4882a593Smuzhiyun
8122*4882a593Smuzhiyun static struct dentry *
trace_create_cpu_file(const char * name,umode_t mode,struct dentry * parent,void * data,long cpu,const struct file_operations * fops)8123*4882a593Smuzhiyun trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8124*4882a593Smuzhiyun void *data, long cpu, const struct file_operations *fops)
8125*4882a593Smuzhiyun {
8126*4882a593Smuzhiyun struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8127*4882a593Smuzhiyun
8128*4882a593Smuzhiyun if (ret) /* See tracing_get_cpu() */
8129*4882a593Smuzhiyun d_inode(ret)->i_cdev = (void *)(cpu + 1);
8130*4882a593Smuzhiyun return ret;
8131*4882a593Smuzhiyun }
8132*4882a593Smuzhiyun
8133*4882a593Smuzhiyun static void
tracing_init_tracefs_percpu(struct trace_array * tr,long cpu)8134*4882a593Smuzhiyun tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8135*4882a593Smuzhiyun {
8136*4882a593Smuzhiyun struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8137*4882a593Smuzhiyun struct dentry *d_cpu;
8138*4882a593Smuzhiyun char cpu_dir[30]; /* 30 characters should be more than enough */
8139*4882a593Smuzhiyun
8140*4882a593Smuzhiyun if (!d_percpu)
8141*4882a593Smuzhiyun return;
8142*4882a593Smuzhiyun
8143*4882a593Smuzhiyun snprintf(cpu_dir, 30, "cpu%ld", cpu);
8144*4882a593Smuzhiyun d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8145*4882a593Smuzhiyun if (!d_cpu) {
8146*4882a593Smuzhiyun pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8147*4882a593Smuzhiyun return;
8148*4882a593Smuzhiyun }
8149*4882a593Smuzhiyun
8150*4882a593Smuzhiyun /* per cpu trace_pipe */
8151*4882a593Smuzhiyun trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8152*4882a593Smuzhiyun tr, cpu, &tracing_pipe_fops);
8153*4882a593Smuzhiyun
8154*4882a593Smuzhiyun /* per cpu trace */
8155*4882a593Smuzhiyun trace_create_cpu_file("trace", 0644, d_cpu,
8156*4882a593Smuzhiyun tr, cpu, &tracing_fops);
8157*4882a593Smuzhiyun
8158*4882a593Smuzhiyun trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8159*4882a593Smuzhiyun tr, cpu, &tracing_buffers_fops);
8160*4882a593Smuzhiyun
8161*4882a593Smuzhiyun trace_create_cpu_file("stats", 0444, d_cpu,
8162*4882a593Smuzhiyun tr, cpu, &tracing_stats_fops);
8163*4882a593Smuzhiyun
8164*4882a593Smuzhiyun trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8165*4882a593Smuzhiyun tr, cpu, &tracing_entries_fops);
8166*4882a593Smuzhiyun
8167*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
8168*4882a593Smuzhiyun trace_create_cpu_file("snapshot", 0644, d_cpu,
8169*4882a593Smuzhiyun tr, cpu, &snapshot_fops);
8170*4882a593Smuzhiyun
8171*4882a593Smuzhiyun trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8172*4882a593Smuzhiyun tr, cpu, &snapshot_raw_fops);
8173*4882a593Smuzhiyun #endif
8174*4882a593Smuzhiyun }
8175*4882a593Smuzhiyun
8176*4882a593Smuzhiyun #ifdef CONFIG_FTRACE_SELFTEST
8177*4882a593Smuzhiyun /* Let selftest have access to static functions in this file */
8178*4882a593Smuzhiyun #include "trace_selftest.c"
8179*4882a593Smuzhiyun #endif
8180*4882a593Smuzhiyun
8181*4882a593Smuzhiyun static ssize_t
trace_options_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8182*4882a593Smuzhiyun trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8183*4882a593Smuzhiyun loff_t *ppos)
8184*4882a593Smuzhiyun {
8185*4882a593Smuzhiyun struct trace_option_dentry *topt = filp->private_data;
8186*4882a593Smuzhiyun char *buf;
8187*4882a593Smuzhiyun
8188*4882a593Smuzhiyun if (topt->flags->val & topt->opt->bit)
8189*4882a593Smuzhiyun buf = "1\n";
8190*4882a593Smuzhiyun else
8191*4882a593Smuzhiyun buf = "0\n";
8192*4882a593Smuzhiyun
8193*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8194*4882a593Smuzhiyun }
8195*4882a593Smuzhiyun
8196*4882a593Smuzhiyun static ssize_t
trace_options_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8197*4882a593Smuzhiyun trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8198*4882a593Smuzhiyun loff_t *ppos)
8199*4882a593Smuzhiyun {
8200*4882a593Smuzhiyun struct trace_option_dentry *topt = filp->private_data;
8201*4882a593Smuzhiyun unsigned long val;
8202*4882a593Smuzhiyun int ret;
8203*4882a593Smuzhiyun
8204*4882a593Smuzhiyun ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8205*4882a593Smuzhiyun if (ret)
8206*4882a593Smuzhiyun return ret;
8207*4882a593Smuzhiyun
8208*4882a593Smuzhiyun if (val != 0 && val != 1)
8209*4882a593Smuzhiyun return -EINVAL;
8210*4882a593Smuzhiyun
8211*4882a593Smuzhiyun if (!!(topt->flags->val & topt->opt->bit) != val) {
8212*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8213*4882a593Smuzhiyun ret = __set_tracer_option(topt->tr, topt->flags,
8214*4882a593Smuzhiyun topt->opt, !val);
8215*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8216*4882a593Smuzhiyun if (ret)
8217*4882a593Smuzhiyun return ret;
8218*4882a593Smuzhiyun }
8219*4882a593Smuzhiyun
8220*4882a593Smuzhiyun *ppos += cnt;
8221*4882a593Smuzhiyun
8222*4882a593Smuzhiyun return cnt;
8223*4882a593Smuzhiyun }
8224*4882a593Smuzhiyun
8225*4882a593Smuzhiyun
8226*4882a593Smuzhiyun static const struct file_operations trace_options_fops = {
8227*4882a593Smuzhiyun .open = tracing_open_generic,
8228*4882a593Smuzhiyun .read = trace_options_read,
8229*4882a593Smuzhiyun .write = trace_options_write,
8230*4882a593Smuzhiyun .llseek = generic_file_llseek,
8231*4882a593Smuzhiyun };
8232*4882a593Smuzhiyun
8233*4882a593Smuzhiyun /*
8234*4882a593Smuzhiyun * In order to pass in both the trace_array descriptor as well as the index
8235*4882a593Smuzhiyun * to the flag that the trace option file represents, the trace_array
8236*4882a593Smuzhiyun * has a character array of trace_flags_index[], which holds the index
8237*4882a593Smuzhiyun * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8238*4882a593Smuzhiyun * The address of this character array is passed to the flag option file
8239*4882a593Smuzhiyun * read/write callbacks.
8240*4882a593Smuzhiyun *
8241*4882a593Smuzhiyun * In order to extract both the index and the trace_array descriptor,
8242*4882a593Smuzhiyun * get_tr_index() uses the following algorithm.
8243*4882a593Smuzhiyun *
8244*4882a593Smuzhiyun * idx = *ptr;
8245*4882a593Smuzhiyun *
8246*4882a593Smuzhiyun * As the pointer itself contains the address of the index (remember
8247*4882a593Smuzhiyun * index[1] == 1).
8248*4882a593Smuzhiyun *
8249*4882a593Smuzhiyun * Then to get the trace_array descriptor, by subtracting that index
8250*4882a593Smuzhiyun * from the ptr, we get to the start of the index itself.
8251*4882a593Smuzhiyun *
8252*4882a593Smuzhiyun * ptr - idx == &index[0]
8253*4882a593Smuzhiyun *
8254*4882a593Smuzhiyun * Then a simple container_of() from that pointer gets us to the
8255*4882a593Smuzhiyun * trace_array descriptor.
8256*4882a593Smuzhiyun */
get_tr_index(void * data,struct trace_array ** ptr,unsigned int * pindex)8257*4882a593Smuzhiyun static void get_tr_index(void *data, struct trace_array **ptr,
8258*4882a593Smuzhiyun unsigned int *pindex)
8259*4882a593Smuzhiyun {
8260*4882a593Smuzhiyun *pindex = *(unsigned char *)data;
8261*4882a593Smuzhiyun
8262*4882a593Smuzhiyun *ptr = container_of(data - *pindex, struct trace_array,
8263*4882a593Smuzhiyun trace_flags_index);
8264*4882a593Smuzhiyun }
8265*4882a593Smuzhiyun
8266*4882a593Smuzhiyun static ssize_t
trace_options_core_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8267*4882a593Smuzhiyun trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8268*4882a593Smuzhiyun loff_t *ppos)
8269*4882a593Smuzhiyun {
8270*4882a593Smuzhiyun void *tr_index = filp->private_data;
8271*4882a593Smuzhiyun struct trace_array *tr;
8272*4882a593Smuzhiyun unsigned int index;
8273*4882a593Smuzhiyun char *buf;
8274*4882a593Smuzhiyun
8275*4882a593Smuzhiyun get_tr_index(tr_index, &tr, &index);
8276*4882a593Smuzhiyun
8277*4882a593Smuzhiyun if (tr->trace_flags & (1 << index))
8278*4882a593Smuzhiyun buf = "1\n";
8279*4882a593Smuzhiyun else
8280*4882a593Smuzhiyun buf = "0\n";
8281*4882a593Smuzhiyun
8282*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8283*4882a593Smuzhiyun }
8284*4882a593Smuzhiyun
8285*4882a593Smuzhiyun static ssize_t
trace_options_core_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8286*4882a593Smuzhiyun trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8287*4882a593Smuzhiyun loff_t *ppos)
8288*4882a593Smuzhiyun {
8289*4882a593Smuzhiyun void *tr_index = filp->private_data;
8290*4882a593Smuzhiyun struct trace_array *tr;
8291*4882a593Smuzhiyun unsigned int index;
8292*4882a593Smuzhiyun unsigned long val;
8293*4882a593Smuzhiyun int ret;
8294*4882a593Smuzhiyun
8295*4882a593Smuzhiyun get_tr_index(tr_index, &tr, &index);
8296*4882a593Smuzhiyun
8297*4882a593Smuzhiyun ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8298*4882a593Smuzhiyun if (ret)
8299*4882a593Smuzhiyun return ret;
8300*4882a593Smuzhiyun
8301*4882a593Smuzhiyun if (val != 0 && val != 1)
8302*4882a593Smuzhiyun return -EINVAL;
8303*4882a593Smuzhiyun
8304*4882a593Smuzhiyun mutex_lock(&event_mutex);
8305*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8306*4882a593Smuzhiyun ret = set_tracer_flag(tr, 1 << index, val);
8307*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8308*4882a593Smuzhiyun mutex_unlock(&event_mutex);
8309*4882a593Smuzhiyun
8310*4882a593Smuzhiyun if (ret < 0)
8311*4882a593Smuzhiyun return ret;
8312*4882a593Smuzhiyun
8313*4882a593Smuzhiyun *ppos += cnt;
8314*4882a593Smuzhiyun
8315*4882a593Smuzhiyun return cnt;
8316*4882a593Smuzhiyun }
8317*4882a593Smuzhiyun
8318*4882a593Smuzhiyun static const struct file_operations trace_options_core_fops = {
8319*4882a593Smuzhiyun .open = tracing_open_generic,
8320*4882a593Smuzhiyun .read = trace_options_core_read,
8321*4882a593Smuzhiyun .write = trace_options_core_write,
8322*4882a593Smuzhiyun .llseek = generic_file_llseek,
8323*4882a593Smuzhiyun };
8324*4882a593Smuzhiyun
trace_create_file(const char * name,umode_t mode,struct dentry * parent,void * data,const struct file_operations * fops)8325*4882a593Smuzhiyun struct dentry *trace_create_file(const char *name,
8326*4882a593Smuzhiyun umode_t mode,
8327*4882a593Smuzhiyun struct dentry *parent,
8328*4882a593Smuzhiyun void *data,
8329*4882a593Smuzhiyun const struct file_operations *fops)
8330*4882a593Smuzhiyun {
8331*4882a593Smuzhiyun struct dentry *ret;
8332*4882a593Smuzhiyun
8333*4882a593Smuzhiyun ret = tracefs_create_file(name, mode, parent, data, fops);
8334*4882a593Smuzhiyun if (!ret)
8335*4882a593Smuzhiyun pr_warn("Could not create tracefs '%s' entry\n", name);
8336*4882a593Smuzhiyun
8337*4882a593Smuzhiyun return ret;
8338*4882a593Smuzhiyun }
8339*4882a593Smuzhiyun
8340*4882a593Smuzhiyun
trace_options_init_dentry(struct trace_array * tr)8341*4882a593Smuzhiyun static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8342*4882a593Smuzhiyun {
8343*4882a593Smuzhiyun struct dentry *d_tracer;
8344*4882a593Smuzhiyun
8345*4882a593Smuzhiyun if (tr->options)
8346*4882a593Smuzhiyun return tr->options;
8347*4882a593Smuzhiyun
8348*4882a593Smuzhiyun d_tracer = tracing_get_dentry(tr);
8349*4882a593Smuzhiyun if (IS_ERR(d_tracer))
8350*4882a593Smuzhiyun return NULL;
8351*4882a593Smuzhiyun
8352*4882a593Smuzhiyun tr->options = tracefs_create_dir("options", d_tracer);
8353*4882a593Smuzhiyun if (!tr->options) {
8354*4882a593Smuzhiyun pr_warn("Could not create tracefs directory 'options'\n");
8355*4882a593Smuzhiyun return NULL;
8356*4882a593Smuzhiyun }
8357*4882a593Smuzhiyun
8358*4882a593Smuzhiyun return tr->options;
8359*4882a593Smuzhiyun }
8360*4882a593Smuzhiyun
8361*4882a593Smuzhiyun static void
create_trace_option_file(struct trace_array * tr,struct trace_option_dentry * topt,struct tracer_flags * flags,struct tracer_opt * opt)8362*4882a593Smuzhiyun create_trace_option_file(struct trace_array *tr,
8363*4882a593Smuzhiyun struct trace_option_dentry *topt,
8364*4882a593Smuzhiyun struct tracer_flags *flags,
8365*4882a593Smuzhiyun struct tracer_opt *opt)
8366*4882a593Smuzhiyun {
8367*4882a593Smuzhiyun struct dentry *t_options;
8368*4882a593Smuzhiyun
8369*4882a593Smuzhiyun t_options = trace_options_init_dentry(tr);
8370*4882a593Smuzhiyun if (!t_options)
8371*4882a593Smuzhiyun return;
8372*4882a593Smuzhiyun
8373*4882a593Smuzhiyun topt->flags = flags;
8374*4882a593Smuzhiyun topt->opt = opt;
8375*4882a593Smuzhiyun topt->tr = tr;
8376*4882a593Smuzhiyun
8377*4882a593Smuzhiyun topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8378*4882a593Smuzhiyun &trace_options_fops);
8379*4882a593Smuzhiyun
8380*4882a593Smuzhiyun }
8381*4882a593Smuzhiyun
8382*4882a593Smuzhiyun static void
create_trace_option_files(struct trace_array * tr,struct tracer * tracer)8383*4882a593Smuzhiyun create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8384*4882a593Smuzhiyun {
8385*4882a593Smuzhiyun struct trace_option_dentry *topts;
8386*4882a593Smuzhiyun struct trace_options *tr_topts;
8387*4882a593Smuzhiyun struct tracer_flags *flags;
8388*4882a593Smuzhiyun struct tracer_opt *opts;
8389*4882a593Smuzhiyun int cnt;
8390*4882a593Smuzhiyun int i;
8391*4882a593Smuzhiyun
8392*4882a593Smuzhiyun if (!tracer)
8393*4882a593Smuzhiyun return;
8394*4882a593Smuzhiyun
8395*4882a593Smuzhiyun flags = tracer->flags;
8396*4882a593Smuzhiyun
8397*4882a593Smuzhiyun if (!flags || !flags->opts)
8398*4882a593Smuzhiyun return;
8399*4882a593Smuzhiyun
8400*4882a593Smuzhiyun /*
8401*4882a593Smuzhiyun * If this is an instance, only create flags for tracers
8402*4882a593Smuzhiyun * the instance may have.
8403*4882a593Smuzhiyun */
8404*4882a593Smuzhiyun if (!trace_ok_for_array(tracer, tr))
8405*4882a593Smuzhiyun return;
8406*4882a593Smuzhiyun
8407*4882a593Smuzhiyun for (i = 0; i < tr->nr_topts; i++) {
8408*4882a593Smuzhiyun /* Make sure there's no duplicate flags. */
8409*4882a593Smuzhiyun if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8410*4882a593Smuzhiyun return;
8411*4882a593Smuzhiyun }
8412*4882a593Smuzhiyun
8413*4882a593Smuzhiyun opts = flags->opts;
8414*4882a593Smuzhiyun
8415*4882a593Smuzhiyun for (cnt = 0; opts[cnt].name; cnt++)
8416*4882a593Smuzhiyun ;
8417*4882a593Smuzhiyun
8418*4882a593Smuzhiyun topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8419*4882a593Smuzhiyun if (!topts)
8420*4882a593Smuzhiyun return;
8421*4882a593Smuzhiyun
8422*4882a593Smuzhiyun tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8423*4882a593Smuzhiyun GFP_KERNEL);
8424*4882a593Smuzhiyun if (!tr_topts) {
8425*4882a593Smuzhiyun kfree(topts);
8426*4882a593Smuzhiyun return;
8427*4882a593Smuzhiyun }
8428*4882a593Smuzhiyun
8429*4882a593Smuzhiyun tr->topts = tr_topts;
8430*4882a593Smuzhiyun tr->topts[tr->nr_topts].tracer = tracer;
8431*4882a593Smuzhiyun tr->topts[tr->nr_topts].topts = topts;
8432*4882a593Smuzhiyun tr->nr_topts++;
8433*4882a593Smuzhiyun
8434*4882a593Smuzhiyun for (cnt = 0; opts[cnt].name; cnt++) {
8435*4882a593Smuzhiyun create_trace_option_file(tr, &topts[cnt], flags,
8436*4882a593Smuzhiyun &opts[cnt]);
8437*4882a593Smuzhiyun MEM_FAIL(topts[cnt].entry == NULL,
8438*4882a593Smuzhiyun "Failed to create trace option: %s",
8439*4882a593Smuzhiyun opts[cnt].name);
8440*4882a593Smuzhiyun }
8441*4882a593Smuzhiyun }
8442*4882a593Smuzhiyun
8443*4882a593Smuzhiyun static struct dentry *
create_trace_option_core_file(struct trace_array * tr,const char * option,long index)8444*4882a593Smuzhiyun create_trace_option_core_file(struct trace_array *tr,
8445*4882a593Smuzhiyun const char *option, long index)
8446*4882a593Smuzhiyun {
8447*4882a593Smuzhiyun struct dentry *t_options;
8448*4882a593Smuzhiyun
8449*4882a593Smuzhiyun t_options = trace_options_init_dentry(tr);
8450*4882a593Smuzhiyun if (!t_options)
8451*4882a593Smuzhiyun return NULL;
8452*4882a593Smuzhiyun
8453*4882a593Smuzhiyun return trace_create_file(option, 0644, t_options,
8454*4882a593Smuzhiyun (void *)&tr->trace_flags_index[index],
8455*4882a593Smuzhiyun &trace_options_core_fops);
8456*4882a593Smuzhiyun }
8457*4882a593Smuzhiyun
create_trace_options_dir(struct trace_array * tr)8458*4882a593Smuzhiyun static void create_trace_options_dir(struct trace_array *tr)
8459*4882a593Smuzhiyun {
8460*4882a593Smuzhiyun struct dentry *t_options;
8461*4882a593Smuzhiyun bool top_level = tr == &global_trace;
8462*4882a593Smuzhiyun int i;
8463*4882a593Smuzhiyun
8464*4882a593Smuzhiyun t_options = trace_options_init_dentry(tr);
8465*4882a593Smuzhiyun if (!t_options)
8466*4882a593Smuzhiyun return;
8467*4882a593Smuzhiyun
8468*4882a593Smuzhiyun for (i = 0; trace_options[i]; i++) {
8469*4882a593Smuzhiyun if (top_level ||
8470*4882a593Smuzhiyun !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8471*4882a593Smuzhiyun create_trace_option_core_file(tr, trace_options[i], i);
8472*4882a593Smuzhiyun }
8473*4882a593Smuzhiyun }
8474*4882a593Smuzhiyun
8475*4882a593Smuzhiyun static ssize_t
rb_simple_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8476*4882a593Smuzhiyun rb_simple_read(struct file *filp, char __user *ubuf,
8477*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
8478*4882a593Smuzhiyun {
8479*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
8480*4882a593Smuzhiyun char buf[64];
8481*4882a593Smuzhiyun int r;
8482*4882a593Smuzhiyun
8483*4882a593Smuzhiyun r = tracer_tracing_is_on(tr);
8484*4882a593Smuzhiyun r = sprintf(buf, "%d\n", r);
8485*4882a593Smuzhiyun
8486*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8487*4882a593Smuzhiyun }
8488*4882a593Smuzhiyun
8489*4882a593Smuzhiyun static ssize_t
rb_simple_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8490*4882a593Smuzhiyun rb_simple_write(struct file *filp, const char __user *ubuf,
8491*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
8492*4882a593Smuzhiyun {
8493*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
8494*4882a593Smuzhiyun struct trace_buffer *buffer = tr->array_buffer.buffer;
8495*4882a593Smuzhiyun unsigned long val;
8496*4882a593Smuzhiyun int ret;
8497*4882a593Smuzhiyun
8498*4882a593Smuzhiyun ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8499*4882a593Smuzhiyun if (ret)
8500*4882a593Smuzhiyun return ret;
8501*4882a593Smuzhiyun
8502*4882a593Smuzhiyun if (buffer) {
8503*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8504*4882a593Smuzhiyun if (!!val == tracer_tracing_is_on(tr)) {
8505*4882a593Smuzhiyun val = 0; /* do nothing */
8506*4882a593Smuzhiyun } else if (val) {
8507*4882a593Smuzhiyun tracer_tracing_on(tr);
8508*4882a593Smuzhiyun if (tr->current_trace->start)
8509*4882a593Smuzhiyun tr->current_trace->start(tr);
8510*4882a593Smuzhiyun } else {
8511*4882a593Smuzhiyun tracer_tracing_off(tr);
8512*4882a593Smuzhiyun if (tr->current_trace->stop)
8513*4882a593Smuzhiyun tr->current_trace->stop(tr);
8514*4882a593Smuzhiyun }
8515*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8516*4882a593Smuzhiyun }
8517*4882a593Smuzhiyun
8518*4882a593Smuzhiyun (*ppos)++;
8519*4882a593Smuzhiyun
8520*4882a593Smuzhiyun return cnt;
8521*4882a593Smuzhiyun }
8522*4882a593Smuzhiyun
8523*4882a593Smuzhiyun static const struct file_operations rb_simple_fops = {
8524*4882a593Smuzhiyun .open = tracing_open_generic_tr,
8525*4882a593Smuzhiyun .read = rb_simple_read,
8526*4882a593Smuzhiyun .write = rb_simple_write,
8527*4882a593Smuzhiyun .release = tracing_release_generic_tr,
8528*4882a593Smuzhiyun .llseek = default_llseek,
8529*4882a593Smuzhiyun };
8530*4882a593Smuzhiyun
8531*4882a593Smuzhiyun static ssize_t
buffer_percent_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8532*4882a593Smuzhiyun buffer_percent_read(struct file *filp, char __user *ubuf,
8533*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
8534*4882a593Smuzhiyun {
8535*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
8536*4882a593Smuzhiyun char buf[64];
8537*4882a593Smuzhiyun int r;
8538*4882a593Smuzhiyun
8539*4882a593Smuzhiyun r = tr->buffer_percent;
8540*4882a593Smuzhiyun r = sprintf(buf, "%d\n", r);
8541*4882a593Smuzhiyun
8542*4882a593Smuzhiyun return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8543*4882a593Smuzhiyun }
8544*4882a593Smuzhiyun
8545*4882a593Smuzhiyun static ssize_t
buffer_percent_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8546*4882a593Smuzhiyun buffer_percent_write(struct file *filp, const char __user *ubuf,
8547*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
8548*4882a593Smuzhiyun {
8549*4882a593Smuzhiyun struct trace_array *tr = filp->private_data;
8550*4882a593Smuzhiyun unsigned long val;
8551*4882a593Smuzhiyun int ret;
8552*4882a593Smuzhiyun
8553*4882a593Smuzhiyun ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8554*4882a593Smuzhiyun if (ret)
8555*4882a593Smuzhiyun return ret;
8556*4882a593Smuzhiyun
8557*4882a593Smuzhiyun if (val > 100)
8558*4882a593Smuzhiyun return -EINVAL;
8559*4882a593Smuzhiyun
8560*4882a593Smuzhiyun if (!val)
8561*4882a593Smuzhiyun val = 1;
8562*4882a593Smuzhiyun
8563*4882a593Smuzhiyun tr->buffer_percent = val;
8564*4882a593Smuzhiyun
8565*4882a593Smuzhiyun (*ppos)++;
8566*4882a593Smuzhiyun
8567*4882a593Smuzhiyun return cnt;
8568*4882a593Smuzhiyun }
8569*4882a593Smuzhiyun
8570*4882a593Smuzhiyun static const struct file_operations buffer_percent_fops = {
8571*4882a593Smuzhiyun .open = tracing_open_generic_tr,
8572*4882a593Smuzhiyun .read = buffer_percent_read,
8573*4882a593Smuzhiyun .write = buffer_percent_write,
8574*4882a593Smuzhiyun .release = tracing_release_generic_tr,
8575*4882a593Smuzhiyun .llseek = default_llseek,
8576*4882a593Smuzhiyun };
8577*4882a593Smuzhiyun
8578*4882a593Smuzhiyun static struct dentry *trace_instance_dir;
8579*4882a593Smuzhiyun
8580*4882a593Smuzhiyun static void
8581*4882a593Smuzhiyun init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8582*4882a593Smuzhiyun
8583*4882a593Smuzhiyun static int
allocate_trace_buffer(struct trace_array * tr,struct array_buffer * buf,int size)8584*4882a593Smuzhiyun allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8585*4882a593Smuzhiyun {
8586*4882a593Smuzhiyun enum ring_buffer_flags rb_flags;
8587*4882a593Smuzhiyun
8588*4882a593Smuzhiyun rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8589*4882a593Smuzhiyun
8590*4882a593Smuzhiyun buf->tr = tr;
8591*4882a593Smuzhiyun
8592*4882a593Smuzhiyun buf->buffer = ring_buffer_alloc(size, rb_flags);
8593*4882a593Smuzhiyun if (!buf->buffer)
8594*4882a593Smuzhiyun return -ENOMEM;
8595*4882a593Smuzhiyun
8596*4882a593Smuzhiyun buf->data = alloc_percpu(struct trace_array_cpu);
8597*4882a593Smuzhiyun if (!buf->data) {
8598*4882a593Smuzhiyun ring_buffer_free(buf->buffer);
8599*4882a593Smuzhiyun buf->buffer = NULL;
8600*4882a593Smuzhiyun return -ENOMEM;
8601*4882a593Smuzhiyun }
8602*4882a593Smuzhiyun
8603*4882a593Smuzhiyun /* Allocate the first page for all buffers */
8604*4882a593Smuzhiyun set_buffer_entries(&tr->array_buffer,
8605*4882a593Smuzhiyun ring_buffer_size(tr->array_buffer.buffer, 0));
8606*4882a593Smuzhiyun
8607*4882a593Smuzhiyun return 0;
8608*4882a593Smuzhiyun }
8609*4882a593Smuzhiyun
allocate_trace_buffers(struct trace_array * tr,int size)8610*4882a593Smuzhiyun static int allocate_trace_buffers(struct trace_array *tr, int size)
8611*4882a593Smuzhiyun {
8612*4882a593Smuzhiyun int ret;
8613*4882a593Smuzhiyun
8614*4882a593Smuzhiyun ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8615*4882a593Smuzhiyun if (ret)
8616*4882a593Smuzhiyun return ret;
8617*4882a593Smuzhiyun
8618*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
8619*4882a593Smuzhiyun ret = allocate_trace_buffer(tr, &tr->max_buffer,
8620*4882a593Smuzhiyun allocate_snapshot ? size : 1);
8621*4882a593Smuzhiyun if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8622*4882a593Smuzhiyun ring_buffer_free(tr->array_buffer.buffer);
8623*4882a593Smuzhiyun tr->array_buffer.buffer = NULL;
8624*4882a593Smuzhiyun free_percpu(tr->array_buffer.data);
8625*4882a593Smuzhiyun tr->array_buffer.data = NULL;
8626*4882a593Smuzhiyun return -ENOMEM;
8627*4882a593Smuzhiyun }
8628*4882a593Smuzhiyun tr->allocated_snapshot = allocate_snapshot;
8629*4882a593Smuzhiyun
8630*4882a593Smuzhiyun /*
8631*4882a593Smuzhiyun * Only the top level trace array gets its snapshot allocated
8632*4882a593Smuzhiyun * from the kernel command line.
8633*4882a593Smuzhiyun */
8634*4882a593Smuzhiyun allocate_snapshot = false;
8635*4882a593Smuzhiyun #endif
8636*4882a593Smuzhiyun
8637*4882a593Smuzhiyun return 0;
8638*4882a593Smuzhiyun }
8639*4882a593Smuzhiyun
free_trace_buffer(struct array_buffer * buf)8640*4882a593Smuzhiyun static void free_trace_buffer(struct array_buffer *buf)
8641*4882a593Smuzhiyun {
8642*4882a593Smuzhiyun if (buf->buffer) {
8643*4882a593Smuzhiyun ring_buffer_free(buf->buffer);
8644*4882a593Smuzhiyun buf->buffer = NULL;
8645*4882a593Smuzhiyun free_percpu(buf->data);
8646*4882a593Smuzhiyun buf->data = NULL;
8647*4882a593Smuzhiyun }
8648*4882a593Smuzhiyun }
8649*4882a593Smuzhiyun
free_trace_buffers(struct trace_array * tr)8650*4882a593Smuzhiyun static void free_trace_buffers(struct trace_array *tr)
8651*4882a593Smuzhiyun {
8652*4882a593Smuzhiyun if (!tr)
8653*4882a593Smuzhiyun return;
8654*4882a593Smuzhiyun
8655*4882a593Smuzhiyun free_trace_buffer(&tr->array_buffer);
8656*4882a593Smuzhiyun
8657*4882a593Smuzhiyun #ifdef CONFIG_TRACER_MAX_TRACE
8658*4882a593Smuzhiyun free_trace_buffer(&tr->max_buffer);
8659*4882a593Smuzhiyun #endif
8660*4882a593Smuzhiyun }
8661*4882a593Smuzhiyun
init_trace_flags_index(struct trace_array * tr)8662*4882a593Smuzhiyun static void init_trace_flags_index(struct trace_array *tr)
8663*4882a593Smuzhiyun {
8664*4882a593Smuzhiyun int i;
8665*4882a593Smuzhiyun
8666*4882a593Smuzhiyun /* Used by the trace options files */
8667*4882a593Smuzhiyun for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8668*4882a593Smuzhiyun tr->trace_flags_index[i] = i;
8669*4882a593Smuzhiyun }
8670*4882a593Smuzhiyun
__update_tracer_options(struct trace_array * tr)8671*4882a593Smuzhiyun static void __update_tracer_options(struct trace_array *tr)
8672*4882a593Smuzhiyun {
8673*4882a593Smuzhiyun struct tracer *t;
8674*4882a593Smuzhiyun
8675*4882a593Smuzhiyun for (t = trace_types; t; t = t->next)
8676*4882a593Smuzhiyun add_tracer_options(tr, t);
8677*4882a593Smuzhiyun }
8678*4882a593Smuzhiyun
update_tracer_options(struct trace_array * tr)8679*4882a593Smuzhiyun static void update_tracer_options(struct trace_array *tr)
8680*4882a593Smuzhiyun {
8681*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8682*4882a593Smuzhiyun tracer_options_updated = true;
8683*4882a593Smuzhiyun __update_tracer_options(tr);
8684*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8685*4882a593Smuzhiyun }
8686*4882a593Smuzhiyun
8687*4882a593Smuzhiyun /* Must have trace_types_lock held */
trace_array_find(const char * instance)8688*4882a593Smuzhiyun struct trace_array *trace_array_find(const char *instance)
8689*4882a593Smuzhiyun {
8690*4882a593Smuzhiyun struct trace_array *tr, *found = NULL;
8691*4882a593Smuzhiyun
8692*4882a593Smuzhiyun list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8693*4882a593Smuzhiyun if (tr->name && strcmp(tr->name, instance) == 0) {
8694*4882a593Smuzhiyun found = tr;
8695*4882a593Smuzhiyun break;
8696*4882a593Smuzhiyun }
8697*4882a593Smuzhiyun }
8698*4882a593Smuzhiyun
8699*4882a593Smuzhiyun return found;
8700*4882a593Smuzhiyun }
8701*4882a593Smuzhiyun
trace_array_find_get(const char * instance)8702*4882a593Smuzhiyun struct trace_array *trace_array_find_get(const char *instance)
8703*4882a593Smuzhiyun {
8704*4882a593Smuzhiyun struct trace_array *tr;
8705*4882a593Smuzhiyun
8706*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8707*4882a593Smuzhiyun tr = trace_array_find(instance);
8708*4882a593Smuzhiyun if (tr)
8709*4882a593Smuzhiyun tr->ref++;
8710*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8711*4882a593Smuzhiyun
8712*4882a593Smuzhiyun return tr;
8713*4882a593Smuzhiyun }
8714*4882a593Smuzhiyun
trace_array_create_dir(struct trace_array * tr)8715*4882a593Smuzhiyun static int trace_array_create_dir(struct trace_array *tr)
8716*4882a593Smuzhiyun {
8717*4882a593Smuzhiyun int ret;
8718*4882a593Smuzhiyun
8719*4882a593Smuzhiyun tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8720*4882a593Smuzhiyun if (!tr->dir)
8721*4882a593Smuzhiyun return -EINVAL;
8722*4882a593Smuzhiyun
8723*4882a593Smuzhiyun ret = event_trace_add_tracer(tr->dir, tr);
8724*4882a593Smuzhiyun if (ret) {
8725*4882a593Smuzhiyun tracefs_remove(tr->dir);
8726*4882a593Smuzhiyun return ret;
8727*4882a593Smuzhiyun }
8728*4882a593Smuzhiyun
8729*4882a593Smuzhiyun init_tracer_tracefs(tr, tr->dir);
8730*4882a593Smuzhiyun __update_tracer_options(tr);
8731*4882a593Smuzhiyun
8732*4882a593Smuzhiyun return ret;
8733*4882a593Smuzhiyun }
8734*4882a593Smuzhiyun
trace_array_create(const char * name)8735*4882a593Smuzhiyun static struct trace_array *trace_array_create(const char *name)
8736*4882a593Smuzhiyun {
8737*4882a593Smuzhiyun struct trace_array *tr;
8738*4882a593Smuzhiyun int ret;
8739*4882a593Smuzhiyun
8740*4882a593Smuzhiyun ret = -ENOMEM;
8741*4882a593Smuzhiyun tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8742*4882a593Smuzhiyun if (!tr)
8743*4882a593Smuzhiyun return ERR_PTR(ret);
8744*4882a593Smuzhiyun
8745*4882a593Smuzhiyun tr->name = kstrdup(name, GFP_KERNEL);
8746*4882a593Smuzhiyun if (!tr->name)
8747*4882a593Smuzhiyun goto out_free_tr;
8748*4882a593Smuzhiyun
8749*4882a593Smuzhiyun if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8750*4882a593Smuzhiyun goto out_free_tr;
8751*4882a593Smuzhiyun
8752*4882a593Smuzhiyun tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8753*4882a593Smuzhiyun
8754*4882a593Smuzhiyun cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8755*4882a593Smuzhiyun
8756*4882a593Smuzhiyun raw_spin_lock_init(&tr->start_lock);
8757*4882a593Smuzhiyun
8758*4882a593Smuzhiyun tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8759*4882a593Smuzhiyun
8760*4882a593Smuzhiyun tr->current_trace = &nop_trace;
8761*4882a593Smuzhiyun
8762*4882a593Smuzhiyun INIT_LIST_HEAD(&tr->systems);
8763*4882a593Smuzhiyun INIT_LIST_HEAD(&tr->events);
8764*4882a593Smuzhiyun INIT_LIST_HEAD(&tr->hist_vars);
8765*4882a593Smuzhiyun INIT_LIST_HEAD(&tr->err_log);
8766*4882a593Smuzhiyun
8767*4882a593Smuzhiyun if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8768*4882a593Smuzhiyun goto out_free_tr;
8769*4882a593Smuzhiyun
8770*4882a593Smuzhiyun if (ftrace_allocate_ftrace_ops(tr) < 0)
8771*4882a593Smuzhiyun goto out_free_tr;
8772*4882a593Smuzhiyun
8773*4882a593Smuzhiyun ftrace_init_trace_array(tr);
8774*4882a593Smuzhiyun
8775*4882a593Smuzhiyun init_trace_flags_index(tr);
8776*4882a593Smuzhiyun
8777*4882a593Smuzhiyun if (trace_instance_dir) {
8778*4882a593Smuzhiyun ret = trace_array_create_dir(tr);
8779*4882a593Smuzhiyun if (ret)
8780*4882a593Smuzhiyun goto out_free_tr;
8781*4882a593Smuzhiyun } else
8782*4882a593Smuzhiyun __trace_early_add_events(tr);
8783*4882a593Smuzhiyun
8784*4882a593Smuzhiyun list_add(&tr->list, &ftrace_trace_arrays);
8785*4882a593Smuzhiyun
8786*4882a593Smuzhiyun tr->ref++;
8787*4882a593Smuzhiyun
8788*4882a593Smuzhiyun return tr;
8789*4882a593Smuzhiyun
8790*4882a593Smuzhiyun out_free_tr:
8791*4882a593Smuzhiyun ftrace_free_ftrace_ops(tr);
8792*4882a593Smuzhiyun free_trace_buffers(tr);
8793*4882a593Smuzhiyun free_cpumask_var(tr->tracing_cpumask);
8794*4882a593Smuzhiyun kfree(tr->name);
8795*4882a593Smuzhiyun kfree(tr);
8796*4882a593Smuzhiyun
8797*4882a593Smuzhiyun return ERR_PTR(ret);
8798*4882a593Smuzhiyun }
8799*4882a593Smuzhiyun
instance_mkdir(const char * name)8800*4882a593Smuzhiyun static int instance_mkdir(const char *name)
8801*4882a593Smuzhiyun {
8802*4882a593Smuzhiyun struct trace_array *tr;
8803*4882a593Smuzhiyun int ret;
8804*4882a593Smuzhiyun
8805*4882a593Smuzhiyun mutex_lock(&event_mutex);
8806*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8807*4882a593Smuzhiyun
8808*4882a593Smuzhiyun ret = -EEXIST;
8809*4882a593Smuzhiyun if (trace_array_find(name))
8810*4882a593Smuzhiyun goto out_unlock;
8811*4882a593Smuzhiyun
8812*4882a593Smuzhiyun tr = trace_array_create(name);
8813*4882a593Smuzhiyun
8814*4882a593Smuzhiyun ret = PTR_ERR_OR_ZERO(tr);
8815*4882a593Smuzhiyun
8816*4882a593Smuzhiyun out_unlock:
8817*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8818*4882a593Smuzhiyun mutex_unlock(&event_mutex);
8819*4882a593Smuzhiyun return ret;
8820*4882a593Smuzhiyun }
8821*4882a593Smuzhiyun
8822*4882a593Smuzhiyun /**
8823*4882a593Smuzhiyun * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8824*4882a593Smuzhiyun * @name: The name of the trace array to be looked up/created.
8825*4882a593Smuzhiyun *
8826*4882a593Smuzhiyun * Returns pointer to trace array with given name.
8827*4882a593Smuzhiyun * NULL, if it cannot be created.
8828*4882a593Smuzhiyun *
8829*4882a593Smuzhiyun * NOTE: This function increments the reference counter associated with the
8830*4882a593Smuzhiyun * trace array returned. This makes sure it cannot be freed while in use.
8831*4882a593Smuzhiyun * Use trace_array_put() once the trace array is no longer needed.
8832*4882a593Smuzhiyun * If the trace_array is to be freed, trace_array_destroy() needs to
8833*4882a593Smuzhiyun * be called after the trace_array_put(), or simply let user space delete
8834*4882a593Smuzhiyun * it from the tracefs instances directory. But until the
8835*4882a593Smuzhiyun * trace_array_put() is called, user space can not delete it.
8836*4882a593Smuzhiyun *
8837*4882a593Smuzhiyun */
trace_array_get_by_name(const char * name)8838*4882a593Smuzhiyun struct trace_array *trace_array_get_by_name(const char *name)
8839*4882a593Smuzhiyun {
8840*4882a593Smuzhiyun struct trace_array *tr;
8841*4882a593Smuzhiyun
8842*4882a593Smuzhiyun mutex_lock(&event_mutex);
8843*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8844*4882a593Smuzhiyun
8845*4882a593Smuzhiyun list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8846*4882a593Smuzhiyun if (tr->name && strcmp(tr->name, name) == 0)
8847*4882a593Smuzhiyun goto out_unlock;
8848*4882a593Smuzhiyun }
8849*4882a593Smuzhiyun
8850*4882a593Smuzhiyun tr = trace_array_create(name);
8851*4882a593Smuzhiyun
8852*4882a593Smuzhiyun if (IS_ERR(tr))
8853*4882a593Smuzhiyun tr = NULL;
8854*4882a593Smuzhiyun out_unlock:
8855*4882a593Smuzhiyun if (tr)
8856*4882a593Smuzhiyun tr->ref++;
8857*4882a593Smuzhiyun
8858*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8859*4882a593Smuzhiyun mutex_unlock(&event_mutex);
8860*4882a593Smuzhiyun return tr;
8861*4882a593Smuzhiyun }
8862*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8863*4882a593Smuzhiyun
__remove_instance(struct trace_array * tr)8864*4882a593Smuzhiyun static int __remove_instance(struct trace_array *tr)
8865*4882a593Smuzhiyun {
8866*4882a593Smuzhiyun int i;
8867*4882a593Smuzhiyun
8868*4882a593Smuzhiyun /* Reference counter for a newly created trace array = 1. */
8869*4882a593Smuzhiyun if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
8870*4882a593Smuzhiyun return -EBUSY;
8871*4882a593Smuzhiyun
8872*4882a593Smuzhiyun list_del(&tr->list);
8873*4882a593Smuzhiyun
8874*4882a593Smuzhiyun /* Disable all the flags that were enabled coming in */
8875*4882a593Smuzhiyun for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8876*4882a593Smuzhiyun if ((1 << i) & ZEROED_TRACE_FLAGS)
8877*4882a593Smuzhiyun set_tracer_flag(tr, 1 << i, 0);
8878*4882a593Smuzhiyun }
8879*4882a593Smuzhiyun
8880*4882a593Smuzhiyun tracing_set_nop(tr);
8881*4882a593Smuzhiyun clear_ftrace_function_probes(tr);
8882*4882a593Smuzhiyun event_trace_del_tracer(tr);
8883*4882a593Smuzhiyun ftrace_clear_pids(tr);
8884*4882a593Smuzhiyun ftrace_destroy_function_files(tr);
8885*4882a593Smuzhiyun tracefs_remove(tr->dir);
8886*4882a593Smuzhiyun free_trace_buffers(tr);
8887*4882a593Smuzhiyun
8888*4882a593Smuzhiyun for (i = 0; i < tr->nr_topts; i++) {
8889*4882a593Smuzhiyun kfree(tr->topts[i].topts);
8890*4882a593Smuzhiyun }
8891*4882a593Smuzhiyun kfree(tr->topts);
8892*4882a593Smuzhiyun
8893*4882a593Smuzhiyun free_cpumask_var(tr->tracing_cpumask);
8894*4882a593Smuzhiyun kfree(tr->name);
8895*4882a593Smuzhiyun kfree(tr);
8896*4882a593Smuzhiyun
8897*4882a593Smuzhiyun return 0;
8898*4882a593Smuzhiyun }
8899*4882a593Smuzhiyun
trace_array_destroy(struct trace_array * this_tr)8900*4882a593Smuzhiyun int trace_array_destroy(struct trace_array *this_tr)
8901*4882a593Smuzhiyun {
8902*4882a593Smuzhiyun struct trace_array *tr;
8903*4882a593Smuzhiyun int ret;
8904*4882a593Smuzhiyun
8905*4882a593Smuzhiyun if (!this_tr)
8906*4882a593Smuzhiyun return -EINVAL;
8907*4882a593Smuzhiyun
8908*4882a593Smuzhiyun mutex_lock(&event_mutex);
8909*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8910*4882a593Smuzhiyun
8911*4882a593Smuzhiyun ret = -ENODEV;
8912*4882a593Smuzhiyun
8913*4882a593Smuzhiyun /* Making sure trace array exists before destroying it. */
8914*4882a593Smuzhiyun list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8915*4882a593Smuzhiyun if (tr == this_tr) {
8916*4882a593Smuzhiyun ret = __remove_instance(tr);
8917*4882a593Smuzhiyun break;
8918*4882a593Smuzhiyun }
8919*4882a593Smuzhiyun }
8920*4882a593Smuzhiyun
8921*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8922*4882a593Smuzhiyun mutex_unlock(&event_mutex);
8923*4882a593Smuzhiyun
8924*4882a593Smuzhiyun return ret;
8925*4882a593Smuzhiyun }
8926*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(trace_array_destroy);
8927*4882a593Smuzhiyun
instance_rmdir(const char * name)8928*4882a593Smuzhiyun static int instance_rmdir(const char *name)
8929*4882a593Smuzhiyun {
8930*4882a593Smuzhiyun struct trace_array *tr;
8931*4882a593Smuzhiyun int ret;
8932*4882a593Smuzhiyun
8933*4882a593Smuzhiyun mutex_lock(&event_mutex);
8934*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8935*4882a593Smuzhiyun
8936*4882a593Smuzhiyun ret = -ENODEV;
8937*4882a593Smuzhiyun tr = trace_array_find(name);
8938*4882a593Smuzhiyun if (tr)
8939*4882a593Smuzhiyun ret = __remove_instance(tr);
8940*4882a593Smuzhiyun
8941*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8942*4882a593Smuzhiyun mutex_unlock(&event_mutex);
8943*4882a593Smuzhiyun
8944*4882a593Smuzhiyun return ret;
8945*4882a593Smuzhiyun }
8946*4882a593Smuzhiyun
create_trace_instances(struct dentry * d_tracer)8947*4882a593Smuzhiyun static __init void create_trace_instances(struct dentry *d_tracer)
8948*4882a593Smuzhiyun {
8949*4882a593Smuzhiyun struct trace_array *tr;
8950*4882a593Smuzhiyun
8951*4882a593Smuzhiyun trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8952*4882a593Smuzhiyun instance_mkdir,
8953*4882a593Smuzhiyun instance_rmdir);
8954*4882a593Smuzhiyun if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8955*4882a593Smuzhiyun return;
8956*4882a593Smuzhiyun
8957*4882a593Smuzhiyun mutex_lock(&event_mutex);
8958*4882a593Smuzhiyun mutex_lock(&trace_types_lock);
8959*4882a593Smuzhiyun
8960*4882a593Smuzhiyun list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8961*4882a593Smuzhiyun if (!tr->name)
8962*4882a593Smuzhiyun continue;
8963*4882a593Smuzhiyun if (MEM_FAIL(trace_array_create_dir(tr) < 0,
8964*4882a593Smuzhiyun "Failed to create instance directory\n"))
8965*4882a593Smuzhiyun break;
8966*4882a593Smuzhiyun }
8967*4882a593Smuzhiyun
8968*4882a593Smuzhiyun mutex_unlock(&trace_types_lock);
8969*4882a593Smuzhiyun mutex_unlock(&event_mutex);
8970*4882a593Smuzhiyun }
8971*4882a593Smuzhiyun
8972*4882a593Smuzhiyun static void
init_tracer_tracefs(struct trace_array * tr,struct dentry * d_tracer)8973*4882a593Smuzhiyun init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8974*4882a593Smuzhiyun {
8975*4882a593Smuzhiyun struct trace_event_file *file;
8976*4882a593Smuzhiyun int cpu;
8977*4882a593Smuzhiyun
8978*4882a593Smuzhiyun trace_create_file("available_tracers", 0444, d_tracer,
8979*4882a593Smuzhiyun tr, &show_traces_fops);
8980*4882a593Smuzhiyun
8981*4882a593Smuzhiyun trace_create_file("current_tracer", 0644, d_tracer,
8982*4882a593Smuzhiyun tr, &set_tracer_fops);
8983*4882a593Smuzhiyun
8984*4882a593Smuzhiyun trace_create_file("tracing_cpumask", 0644, d_tracer,
8985*4882a593Smuzhiyun tr, &tracing_cpumask_fops);
8986*4882a593Smuzhiyun
8987*4882a593Smuzhiyun trace_create_file("trace_options", 0644, d_tracer,
8988*4882a593Smuzhiyun tr, &tracing_iter_fops);
8989*4882a593Smuzhiyun
8990*4882a593Smuzhiyun trace_create_file("trace", 0644, d_tracer,
8991*4882a593Smuzhiyun tr, &tracing_fops);
8992*4882a593Smuzhiyun
8993*4882a593Smuzhiyun trace_create_file("trace_pipe", 0444, d_tracer,
8994*4882a593Smuzhiyun tr, &tracing_pipe_fops);
8995*4882a593Smuzhiyun
8996*4882a593Smuzhiyun trace_create_file("buffer_size_kb", 0644, d_tracer,
8997*4882a593Smuzhiyun tr, &tracing_entries_fops);
8998*4882a593Smuzhiyun
8999*4882a593Smuzhiyun trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9000*4882a593Smuzhiyun tr, &tracing_total_entries_fops);
9001*4882a593Smuzhiyun
9002*4882a593Smuzhiyun trace_create_file("free_buffer", 0200, d_tracer,
9003*4882a593Smuzhiyun tr, &tracing_free_buffer_fops);
9004*4882a593Smuzhiyun
9005*4882a593Smuzhiyun trace_create_file("trace_marker", 0220, d_tracer,
9006*4882a593Smuzhiyun tr, &tracing_mark_fops);
9007*4882a593Smuzhiyun
9008*4882a593Smuzhiyun file = __find_event_file(tr, "ftrace", "print");
9009*4882a593Smuzhiyun if (file && file->dir)
9010*4882a593Smuzhiyun trace_create_file("trigger", 0644, file->dir, file,
9011*4882a593Smuzhiyun &event_trigger_fops);
9012*4882a593Smuzhiyun tr->trace_marker_file = file;
9013*4882a593Smuzhiyun
9014*4882a593Smuzhiyun trace_create_file("trace_marker_raw", 0220, d_tracer,
9015*4882a593Smuzhiyun tr, &tracing_mark_raw_fops);
9016*4882a593Smuzhiyun
9017*4882a593Smuzhiyun trace_create_file("trace_clock", 0644, d_tracer, tr,
9018*4882a593Smuzhiyun &trace_clock_fops);
9019*4882a593Smuzhiyun
9020*4882a593Smuzhiyun trace_create_file("tracing_on", 0644, d_tracer,
9021*4882a593Smuzhiyun tr, &rb_simple_fops);
9022*4882a593Smuzhiyun
9023*4882a593Smuzhiyun trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9024*4882a593Smuzhiyun &trace_time_stamp_mode_fops);
9025*4882a593Smuzhiyun
9026*4882a593Smuzhiyun tr->buffer_percent = 50;
9027*4882a593Smuzhiyun
9028*4882a593Smuzhiyun trace_create_file("buffer_percent", 0444, d_tracer,
9029*4882a593Smuzhiyun tr, &buffer_percent_fops);
9030*4882a593Smuzhiyun
9031*4882a593Smuzhiyun create_trace_options_dir(tr);
9032*4882a593Smuzhiyun
9033*4882a593Smuzhiyun #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
9034*4882a593Smuzhiyun trace_create_maxlat_file(tr, d_tracer);
9035*4882a593Smuzhiyun #endif
9036*4882a593Smuzhiyun
9037*4882a593Smuzhiyun if (ftrace_create_function_files(tr, d_tracer))
9038*4882a593Smuzhiyun MEM_FAIL(1, "Could not allocate function filter files");
9039*4882a593Smuzhiyun
9040*4882a593Smuzhiyun #ifdef CONFIG_TRACER_SNAPSHOT
9041*4882a593Smuzhiyun trace_create_file("snapshot", 0644, d_tracer,
9042*4882a593Smuzhiyun tr, &snapshot_fops);
9043*4882a593Smuzhiyun #endif
9044*4882a593Smuzhiyun
9045*4882a593Smuzhiyun trace_create_file("error_log", 0644, d_tracer,
9046*4882a593Smuzhiyun tr, &tracing_err_log_fops);
9047*4882a593Smuzhiyun
9048*4882a593Smuzhiyun for_each_tracing_cpu(cpu)
9049*4882a593Smuzhiyun tracing_init_tracefs_percpu(tr, cpu);
9050*4882a593Smuzhiyun
9051*4882a593Smuzhiyun ftrace_init_tracefs(tr, d_tracer);
9052*4882a593Smuzhiyun }
9053*4882a593Smuzhiyun
9054*4882a593Smuzhiyun #ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT
trace_automount(struct dentry * mntpt,void * ingore)9055*4882a593Smuzhiyun static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9056*4882a593Smuzhiyun {
9057*4882a593Smuzhiyun struct vfsmount *mnt;
9058*4882a593Smuzhiyun struct file_system_type *type;
9059*4882a593Smuzhiyun
9060*4882a593Smuzhiyun /*
9061*4882a593Smuzhiyun * To maintain backward compatibility for tools that mount
9062*4882a593Smuzhiyun * debugfs to get to the tracing facility, tracefs is automatically
9063*4882a593Smuzhiyun * mounted to the debugfs/tracing directory.
9064*4882a593Smuzhiyun */
9065*4882a593Smuzhiyun type = get_fs_type("tracefs");
9066*4882a593Smuzhiyun if (!type)
9067*4882a593Smuzhiyun return NULL;
9068*4882a593Smuzhiyun mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9069*4882a593Smuzhiyun put_filesystem(type);
9070*4882a593Smuzhiyun if (IS_ERR(mnt))
9071*4882a593Smuzhiyun return NULL;
9072*4882a593Smuzhiyun mntget(mnt);
9073*4882a593Smuzhiyun
9074*4882a593Smuzhiyun return mnt;
9075*4882a593Smuzhiyun }
9076*4882a593Smuzhiyun #endif
9077*4882a593Smuzhiyun
9078*4882a593Smuzhiyun /**
9079*4882a593Smuzhiyun * tracing_init_dentry - initialize top level trace array
9080*4882a593Smuzhiyun *
9081*4882a593Smuzhiyun * This is called when creating files or directories in the tracing
9082*4882a593Smuzhiyun * directory. It is called via fs_initcall() by any of the boot up code
9083*4882a593Smuzhiyun * and expects to return the dentry of the top level tracing directory.
9084*4882a593Smuzhiyun */
tracing_init_dentry(void)9085*4882a593Smuzhiyun int tracing_init_dentry(void)
9086*4882a593Smuzhiyun {
9087*4882a593Smuzhiyun struct trace_array *tr = &global_trace;
9088*4882a593Smuzhiyun
9089*4882a593Smuzhiyun if (security_locked_down(LOCKDOWN_TRACEFS)) {
9090*4882a593Smuzhiyun pr_warn("Tracing disabled due to lockdown\n");
9091*4882a593Smuzhiyun return -EPERM;
9092*4882a593Smuzhiyun }
9093*4882a593Smuzhiyun
9094*4882a593Smuzhiyun /* The top level trace array uses NULL as parent */
9095*4882a593Smuzhiyun if (tr->dir)
9096*4882a593Smuzhiyun return 0;
9097*4882a593Smuzhiyun
9098*4882a593Smuzhiyun if (WARN_ON(!tracefs_initialized()))
9099*4882a593Smuzhiyun return -ENODEV;
9100*4882a593Smuzhiyun
9101*4882a593Smuzhiyun #ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT
9102*4882a593Smuzhiyun /*
9103*4882a593Smuzhiyun * As there may still be users that expect the tracing
9104*4882a593Smuzhiyun * files to exist in debugfs/tracing, we must automount
9105*4882a593Smuzhiyun * the tracefs file system there, so older tools still
9106*4882a593Smuzhiyun * work with the newer kerenl.
9107*4882a593Smuzhiyun */
9108*4882a593Smuzhiyun tr->dir = debugfs_create_automount("tracing", NULL,
9109*4882a593Smuzhiyun trace_automount, NULL);
9110*4882a593Smuzhiyun #else
9111*4882a593Smuzhiyun tr->dir = ERR_PTR(-ENODEV);
9112*4882a593Smuzhiyun #endif
9113*4882a593Smuzhiyun
9114*4882a593Smuzhiyun return 0;
9115*4882a593Smuzhiyun }
9116*4882a593Smuzhiyun
9117*4882a593Smuzhiyun extern struct trace_eval_map *__start_ftrace_eval_maps[];
9118*4882a593Smuzhiyun extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9119*4882a593Smuzhiyun
trace_eval_init(void)9120*4882a593Smuzhiyun static void __init trace_eval_init(void)
9121*4882a593Smuzhiyun {
9122*4882a593Smuzhiyun int len;
9123*4882a593Smuzhiyun
9124*4882a593Smuzhiyun len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9125*4882a593Smuzhiyun trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9126*4882a593Smuzhiyun }
9127*4882a593Smuzhiyun
9128*4882a593Smuzhiyun #ifdef CONFIG_MODULES
trace_module_add_evals(struct module * mod)9129*4882a593Smuzhiyun static void trace_module_add_evals(struct module *mod)
9130*4882a593Smuzhiyun {
9131*4882a593Smuzhiyun if (!mod->num_trace_evals)
9132*4882a593Smuzhiyun return;
9133*4882a593Smuzhiyun
9134*4882a593Smuzhiyun /*
9135*4882a593Smuzhiyun * Modules with bad taint do not have events created, do
9136*4882a593Smuzhiyun * not bother with enums either.
9137*4882a593Smuzhiyun */
9138*4882a593Smuzhiyun if (trace_module_has_bad_taint(mod))
9139*4882a593Smuzhiyun return;
9140*4882a593Smuzhiyun
9141*4882a593Smuzhiyun trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9142*4882a593Smuzhiyun }
9143*4882a593Smuzhiyun
9144*4882a593Smuzhiyun #ifdef CONFIG_TRACE_EVAL_MAP_FILE
trace_module_remove_evals(struct module * mod)9145*4882a593Smuzhiyun static void trace_module_remove_evals(struct module *mod)
9146*4882a593Smuzhiyun {
9147*4882a593Smuzhiyun union trace_eval_map_item *map;
9148*4882a593Smuzhiyun union trace_eval_map_item **last = &trace_eval_maps;
9149*4882a593Smuzhiyun
9150*4882a593Smuzhiyun if (!mod->num_trace_evals)
9151*4882a593Smuzhiyun return;
9152*4882a593Smuzhiyun
9153*4882a593Smuzhiyun mutex_lock(&trace_eval_mutex);
9154*4882a593Smuzhiyun
9155*4882a593Smuzhiyun map = trace_eval_maps;
9156*4882a593Smuzhiyun
9157*4882a593Smuzhiyun while (map) {
9158*4882a593Smuzhiyun if (map->head.mod == mod)
9159*4882a593Smuzhiyun break;
9160*4882a593Smuzhiyun map = trace_eval_jmp_to_tail(map);
9161*4882a593Smuzhiyun last = &map->tail.next;
9162*4882a593Smuzhiyun map = map->tail.next;
9163*4882a593Smuzhiyun }
9164*4882a593Smuzhiyun if (!map)
9165*4882a593Smuzhiyun goto out;
9166*4882a593Smuzhiyun
9167*4882a593Smuzhiyun *last = trace_eval_jmp_to_tail(map)->tail.next;
9168*4882a593Smuzhiyun kfree(map);
9169*4882a593Smuzhiyun out:
9170*4882a593Smuzhiyun mutex_unlock(&trace_eval_mutex);
9171*4882a593Smuzhiyun }
9172*4882a593Smuzhiyun #else
trace_module_remove_evals(struct module * mod)9173*4882a593Smuzhiyun static inline void trace_module_remove_evals(struct module *mod) { }
9174*4882a593Smuzhiyun #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9175*4882a593Smuzhiyun
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)9176*4882a593Smuzhiyun static int trace_module_notify(struct notifier_block *self,
9177*4882a593Smuzhiyun unsigned long val, void *data)
9178*4882a593Smuzhiyun {
9179*4882a593Smuzhiyun struct module *mod = data;
9180*4882a593Smuzhiyun
9181*4882a593Smuzhiyun switch (val) {
9182*4882a593Smuzhiyun case MODULE_STATE_COMING:
9183*4882a593Smuzhiyun trace_module_add_evals(mod);
9184*4882a593Smuzhiyun break;
9185*4882a593Smuzhiyun case MODULE_STATE_GOING:
9186*4882a593Smuzhiyun trace_module_remove_evals(mod);
9187*4882a593Smuzhiyun break;
9188*4882a593Smuzhiyun }
9189*4882a593Smuzhiyun
9190*4882a593Smuzhiyun return NOTIFY_OK;
9191*4882a593Smuzhiyun }
9192*4882a593Smuzhiyun
9193*4882a593Smuzhiyun static struct notifier_block trace_module_nb = {
9194*4882a593Smuzhiyun .notifier_call = trace_module_notify,
9195*4882a593Smuzhiyun .priority = 0,
9196*4882a593Smuzhiyun };
9197*4882a593Smuzhiyun #endif /* CONFIG_MODULES */
9198*4882a593Smuzhiyun
tracer_init_tracefs(void)9199*4882a593Smuzhiyun static __init int tracer_init_tracefs(void)
9200*4882a593Smuzhiyun {
9201*4882a593Smuzhiyun int ret;
9202*4882a593Smuzhiyun
9203*4882a593Smuzhiyun trace_access_lock_init();
9204*4882a593Smuzhiyun
9205*4882a593Smuzhiyun ret = tracing_init_dentry();
9206*4882a593Smuzhiyun if (ret)
9207*4882a593Smuzhiyun return 0;
9208*4882a593Smuzhiyun
9209*4882a593Smuzhiyun event_trace_init();
9210*4882a593Smuzhiyun
9211*4882a593Smuzhiyun init_tracer_tracefs(&global_trace, NULL);
9212*4882a593Smuzhiyun ftrace_init_tracefs_toplevel(&global_trace, NULL);
9213*4882a593Smuzhiyun
9214*4882a593Smuzhiyun trace_create_file("tracing_thresh", 0644, NULL,
9215*4882a593Smuzhiyun &global_trace, &tracing_thresh_fops);
9216*4882a593Smuzhiyun
9217*4882a593Smuzhiyun trace_create_file("README", 0444, NULL,
9218*4882a593Smuzhiyun NULL, &tracing_readme_fops);
9219*4882a593Smuzhiyun
9220*4882a593Smuzhiyun trace_create_file("saved_cmdlines", 0444, NULL,
9221*4882a593Smuzhiyun NULL, &tracing_saved_cmdlines_fops);
9222*4882a593Smuzhiyun
9223*4882a593Smuzhiyun trace_create_file("saved_cmdlines_size", 0644, NULL,
9224*4882a593Smuzhiyun NULL, &tracing_saved_cmdlines_size_fops);
9225*4882a593Smuzhiyun
9226*4882a593Smuzhiyun trace_create_file("saved_tgids", 0444, NULL,
9227*4882a593Smuzhiyun NULL, &tracing_saved_tgids_fops);
9228*4882a593Smuzhiyun
9229*4882a593Smuzhiyun trace_eval_init();
9230*4882a593Smuzhiyun
9231*4882a593Smuzhiyun trace_create_eval_file(NULL);
9232*4882a593Smuzhiyun
9233*4882a593Smuzhiyun #ifdef CONFIG_MODULES
9234*4882a593Smuzhiyun register_module_notifier(&trace_module_nb);
9235*4882a593Smuzhiyun #endif
9236*4882a593Smuzhiyun
9237*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_FTRACE
9238*4882a593Smuzhiyun trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9239*4882a593Smuzhiyun NULL, &tracing_dyn_info_fops);
9240*4882a593Smuzhiyun #endif
9241*4882a593Smuzhiyun
9242*4882a593Smuzhiyun create_trace_instances(NULL);
9243*4882a593Smuzhiyun
9244*4882a593Smuzhiyun update_tracer_options(&global_trace);
9245*4882a593Smuzhiyun
9246*4882a593Smuzhiyun return 0;
9247*4882a593Smuzhiyun }
9248*4882a593Smuzhiyun
trace_panic_handler(struct notifier_block * this,unsigned long event,void * unused)9249*4882a593Smuzhiyun static int trace_panic_handler(struct notifier_block *this,
9250*4882a593Smuzhiyun unsigned long event, void *unused)
9251*4882a593Smuzhiyun {
9252*4882a593Smuzhiyun bool ftrace_check = false;
9253*4882a593Smuzhiyun
9254*4882a593Smuzhiyun trace_android_vh_ftrace_oops_enter(&ftrace_check);
9255*4882a593Smuzhiyun
9256*4882a593Smuzhiyun if (ftrace_check)
9257*4882a593Smuzhiyun return NOTIFY_OK;
9258*4882a593Smuzhiyun
9259*4882a593Smuzhiyun if (ftrace_dump_on_oops)
9260*4882a593Smuzhiyun ftrace_dump(ftrace_dump_on_oops);
9261*4882a593Smuzhiyun
9262*4882a593Smuzhiyun trace_android_vh_ftrace_oops_exit(&ftrace_check);
9263*4882a593Smuzhiyun return NOTIFY_OK;
9264*4882a593Smuzhiyun }
9265*4882a593Smuzhiyun
9266*4882a593Smuzhiyun static struct notifier_block trace_panic_notifier = {
9267*4882a593Smuzhiyun .notifier_call = trace_panic_handler,
9268*4882a593Smuzhiyun .next = NULL,
9269*4882a593Smuzhiyun .priority = 150 /* priority: INT_MAX >= x >= 0 */
9270*4882a593Smuzhiyun };
9271*4882a593Smuzhiyun
trace_die_handler(struct notifier_block * self,unsigned long val,void * data)9272*4882a593Smuzhiyun static int trace_die_handler(struct notifier_block *self,
9273*4882a593Smuzhiyun unsigned long val,
9274*4882a593Smuzhiyun void *data)
9275*4882a593Smuzhiyun {
9276*4882a593Smuzhiyun bool ftrace_check = false;
9277*4882a593Smuzhiyun
9278*4882a593Smuzhiyun trace_android_vh_ftrace_oops_enter(&ftrace_check);
9279*4882a593Smuzhiyun
9280*4882a593Smuzhiyun if (ftrace_check)
9281*4882a593Smuzhiyun return NOTIFY_OK;
9282*4882a593Smuzhiyun
9283*4882a593Smuzhiyun switch (val) {
9284*4882a593Smuzhiyun case DIE_OOPS:
9285*4882a593Smuzhiyun if (ftrace_dump_on_oops)
9286*4882a593Smuzhiyun ftrace_dump(ftrace_dump_on_oops);
9287*4882a593Smuzhiyun break;
9288*4882a593Smuzhiyun default:
9289*4882a593Smuzhiyun break;
9290*4882a593Smuzhiyun }
9291*4882a593Smuzhiyun
9292*4882a593Smuzhiyun trace_android_vh_ftrace_oops_exit(&ftrace_check);
9293*4882a593Smuzhiyun return NOTIFY_OK;
9294*4882a593Smuzhiyun }
9295*4882a593Smuzhiyun
9296*4882a593Smuzhiyun static struct notifier_block trace_die_notifier = {
9297*4882a593Smuzhiyun .notifier_call = trace_die_handler,
9298*4882a593Smuzhiyun .priority = 200
9299*4882a593Smuzhiyun };
9300*4882a593Smuzhiyun
9301*4882a593Smuzhiyun /*
9302*4882a593Smuzhiyun * printk is set to max of 1024, we really don't need it that big.
9303*4882a593Smuzhiyun * Nothing should be printing 1000 characters anyway.
9304*4882a593Smuzhiyun */
9305*4882a593Smuzhiyun #define TRACE_MAX_PRINT 1000
9306*4882a593Smuzhiyun
9307*4882a593Smuzhiyun /*
9308*4882a593Smuzhiyun * Define here KERN_TRACE so that we have one place to modify
9309*4882a593Smuzhiyun * it if we decide to change what log level the ftrace dump
9310*4882a593Smuzhiyun * should be at.
9311*4882a593Smuzhiyun */
9312*4882a593Smuzhiyun #define KERN_TRACE KERN_EMERG
9313*4882a593Smuzhiyun
9314*4882a593Smuzhiyun void
trace_printk_seq(struct trace_seq * s)9315*4882a593Smuzhiyun trace_printk_seq(struct trace_seq *s)
9316*4882a593Smuzhiyun {
9317*4882a593Smuzhiyun bool dump_printk = true;
9318*4882a593Smuzhiyun
9319*4882a593Smuzhiyun /* Probably should print a warning here. */
9320*4882a593Smuzhiyun if (s->seq.len >= TRACE_MAX_PRINT)
9321*4882a593Smuzhiyun s->seq.len = TRACE_MAX_PRINT;
9322*4882a593Smuzhiyun
9323*4882a593Smuzhiyun /*
9324*4882a593Smuzhiyun * More paranoid code. Although the buffer size is set to
9325*4882a593Smuzhiyun * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9326*4882a593Smuzhiyun * an extra layer of protection.
9327*4882a593Smuzhiyun */
9328*4882a593Smuzhiyun if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9329*4882a593Smuzhiyun s->seq.len = s->seq.size - 1;
9330*4882a593Smuzhiyun
9331*4882a593Smuzhiyun /* should be zero ended, but we are paranoid. */
9332*4882a593Smuzhiyun s->buffer[s->seq.len] = 0;
9333*4882a593Smuzhiyun
9334*4882a593Smuzhiyun trace_android_vh_ftrace_dump_buffer(s, &dump_printk);
9335*4882a593Smuzhiyun if (dump_printk)
9336*4882a593Smuzhiyun printk(KERN_TRACE "%s", s->buffer);
9337*4882a593Smuzhiyun
9338*4882a593Smuzhiyun trace_seq_init(s);
9339*4882a593Smuzhiyun }
9340*4882a593Smuzhiyun
trace_init_global_iter(struct trace_iterator * iter)9341*4882a593Smuzhiyun void trace_init_global_iter(struct trace_iterator *iter)
9342*4882a593Smuzhiyun {
9343*4882a593Smuzhiyun iter->tr = &global_trace;
9344*4882a593Smuzhiyun iter->trace = iter->tr->current_trace;
9345*4882a593Smuzhiyun iter->cpu_file = RING_BUFFER_ALL_CPUS;
9346*4882a593Smuzhiyun iter->array_buffer = &global_trace.array_buffer;
9347*4882a593Smuzhiyun
9348*4882a593Smuzhiyun if (iter->trace && iter->trace->open)
9349*4882a593Smuzhiyun iter->trace->open(iter);
9350*4882a593Smuzhiyun
9351*4882a593Smuzhiyun /* Annotate start of buffers if we had overruns */
9352*4882a593Smuzhiyun if (ring_buffer_overruns(iter->array_buffer->buffer))
9353*4882a593Smuzhiyun iter->iter_flags |= TRACE_FILE_ANNOTATE;
9354*4882a593Smuzhiyun
9355*4882a593Smuzhiyun /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9356*4882a593Smuzhiyun if (trace_clocks[iter->tr->clock_id].in_ns)
9357*4882a593Smuzhiyun iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9358*4882a593Smuzhiyun }
9359*4882a593Smuzhiyun
ftrace_dump(enum ftrace_dump_mode oops_dump_mode)9360*4882a593Smuzhiyun void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9361*4882a593Smuzhiyun {
9362*4882a593Smuzhiyun /* use static because iter can be a bit big for the stack */
9363*4882a593Smuzhiyun static struct trace_iterator iter;
9364*4882a593Smuzhiyun static atomic_t dump_running;
9365*4882a593Smuzhiyun struct trace_array *tr = &global_trace;
9366*4882a593Smuzhiyun unsigned int old_userobj;
9367*4882a593Smuzhiyun unsigned long flags;
9368*4882a593Smuzhiyun int cnt = 0, cpu;
9369*4882a593Smuzhiyun bool ftrace_check = false;
9370*4882a593Smuzhiyun unsigned long size;
9371*4882a593Smuzhiyun
9372*4882a593Smuzhiyun /* Only allow one dump user at a time. */
9373*4882a593Smuzhiyun if (atomic_inc_return(&dump_running) != 1) {
9374*4882a593Smuzhiyun atomic_dec(&dump_running);
9375*4882a593Smuzhiyun return;
9376*4882a593Smuzhiyun }
9377*4882a593Smuzhiyun
9378*4882a593Smuzhiyun /*
9379*4882a593Smuzhiyun * Always turn off tracing when we dump.
9380*4882a593Smuzhiyun * We don't need to show trace output of what happens
9381*4882a593Smuzhiyun * between multiple crashes.
9382*4882a593Smuzhiyun *
9383*4882a593Smuzhiyun * If the user does a sysrq-z, then they can re-enable
9384*4882a593Smuzhiyun * tracing with echo 1 > tracing_on.
9385*4882a593Smuzhiyun */
9386*4882a593Smuzhiyun tracing_off();
9387*4882a593Smuzhiyun
9388*4882a593Smuzhiyun local_irq_save(flags);
9389*4882a593Smuzhiyun printk_nmi_direct_enter();
9390*4882a593Smuzhiyun
9391*4882a593Smuzhiyun /* Simulate the iterator */
9392*4882a593Smuzhiyun trace_init_global_iter(&iter);
9393*4882a593Smuzhiyun /* Can not use kmalloc for iter.temp */
9394*4882a593Smuzhiyun iter.temp = static_temp_buf;
9395*4882a593Smuzhiyun iter.temp_size = STATIC_TEMP_BUF_SIZE;
9396*4882a593Smuzhiyun
9397*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
9398*4882a593Smuzhiyun atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9399*4882a593Smuzhiyun size = ring_buffer_size(iter.array_buffer->buffer, cpu);
9400*4882a593Smuzhiyun trace_android_vh_ftrace_size_check(size, &ftrace_check);
9401*4882a593Smuzhiyun }
9402*4882a593Smuzhiyun
9403*4882a593Smuzhiyun old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9404*4882a593Smuzhiyun
9405*4882a593Smuzhiyun /* don't look at user memory in panic mode */
9406*4882a593Smuzhiyun tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9407*4882a593Smuzhiyun
9408*4882a593Smuzhiyun if (ftrace_check)
9409*4882a593Smuzhiyun goto out_enable;
9410*4882a593Smuzhiyun
9411*4882a593Smuzhiyun switch (oops_dump_mode) {
9412*4882a593Smuzhiyun case DUMP_ALL:
9413*4882a593Smuzhiyun iter.cpu_file = RING_BUFFER_ALL_CPUS;
9414*4882a593Smuzhiyun break;
9415*4882a593Smuzhiyun case DUMP_ORIG:
9416*4882a593Smuzhiyun iter.cpu_file = raw_smp_processor_id();
9417*4882a593Smuzhiyun break;
9418*4882a593Smuzhiyun case DUMP_NONE:
9419*4882a593Smuzhiyun goto out_enable;
9420*4882a593Smuzhiyun default:
9421*4882a593Smuzhiyun printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9422*4882a593Smuzhiyun iter.cpu_file = RING_BUFFER_ALL_CPUS;
9423*4882a593Smuzhiyun }
9424*4882a593Smuzhiyun
9425*4882a593Smuzhiyun printk(KERN_TRACE "Dumping ftrace buffer:\n");
9426*4882a593Smuzhiyun
9427*4882a593Smuzhiyun /* Did function tracer already get disabled? */
9428*4882a593Smuzhiyun if (ftrace_is_dead()) {
9429*4882a593Smuzhiyun printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9430*4882a593Smuzhiyun printk("# MAY BE MISSING FUNCTION EVENTS\n");
9431*4882a593Smuzhiyun }
9432*4882a593Smuzhiyun
9433*4882a593Smuzhiyun /*
9434*4882a593Smuzhiyun * We need to stop all tracing on all CPUS to read
9435*4882a593Smuzhiyun * the next buffer. This is a bit expensive, but is
9436*4882a593Smuzhiyun * not done often. We fill all what we can read,
9437*4882a593Smuzhiyun * and then release the locks again.
9438*4882a593Smuzhiyun */
9439*4882a593Smuzhiyun
9440*4882a593Smuzhiyun while (!trace_empty(&iter)) {
9441*4882a593Smuzhiyun ftrace_check = true;
9442*4882a593Smuzhiyun
9443*4882a593Smuzhiyun if (!cnt)
9444*4882a593Smuzhiyun printk(KERN_TRACE "---------------------------------\n");
9445*4882a593Smuzhiyun
9446*4882a593Smuzhiyun cnt++;
9447*4882a593Smuzhiyun
9448*4882a593Smuzhiyun trace_iterator_reset(&iter);
9449*4882a593Smuzhiyun trace_android_vh_ftrace_format_check(&ftrace_check);
9450*4882a593Smuzhiyun if (ftrace_check)
9451*4882a593Smuzhiyun iter.iter_flags |= TRACE_FILE_LAT_FMT;
9452*4882a593Smuzhiyun
9453*4882a593Smuzhiyun if (trace_find_next_entry_inc(&iter) != NULL) {
9454*4882a593Smuzhiyun int ret;
9455*4882a593Smuzhiyun
9456*4882a593Smuzhiyun ret = print_trace_line(&iter);
9457*4882a593Smuzhiyun if (ret != TRACE_TYPE_NO_CONSUME)
9458*4882a593Smuzhiyun trace_consume(&iter);
9459*4882a593Smuzhiyun }
9460*4882a593Smuzhiyun touch_nmi_watchdog();
9461*4882a593Smuzhiyun
9462*4882a593Smuzhiyun trace_printk_seq(&iter.seq);
9463*4882a593Smuzhiyun }
9464*4882a593Smuzhiyun
9465*4882a593Smuzhiyun if (!cnt)
9466*4882a593Smuzhiyun printk(KERN_TRACE " (ftrace buffer empty)\n");
9467*4882a593Smuzhiyun else
9468*4882a593Smuzhiyun printk(KERN_TRACE "---------------------------------\n");
9469*4882a593Smuzhiyun
9470*4882a593Smuzhiyun out_enable:
9471*4882a593Smuzhiyun tr->trace_flags |= old_userobj;
9472*4882a593Smuzhiyun
9473*4882a593Smuzhiyun for_each_tracing_cpu(cpu) {
9474*4882a593Smuzhiyun atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9475*4882a593Smuzhiyun }
9476*4882a593Smuzhiyun atomic_dec(&dump_running);
9477*4882a593Smuzhiyun printk_nmi_direct_exit();
9478*4882a593Smuzhiyun local_irq_restore(flags);
9479*4882a593Smuzhiyun }
9480*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ftrace_dump);
9481*4882a593Smuzhiyun
trace_run_command(const char * buf,int (* createfn)(int,char **))9482*4882a593Smuzhiyun int trace_run_command(const char *buf, int (*createfn)(int, char **))
9483*4882a593Smuzhiyun {
9484*4882a593Smuzhiyun char **argv;
9485*4882a593Smuzhiyun int argc, ret;
9486*4882a593Smuzhiyun
9487*4882a593Smuzhiyun argc = 0;
9488*4882a593Smuzhiyun ret = 0;
9489*4882a593Smuzhiyun argv = argv_split(GFP_KERNEL, buf, &argc);
9490*4882a593Smuzhiyun if (!argv)
9491*4882a593Smuzhiyun return -ENOMEM;
9492*4882a593Smuzhiyun
9493*4882a593Smuzhiyun if (argc)
9494*4882a593Smuzhiyun ret = createfn(argc, argv);
9495*4882a593Smuzhiyun
9496*4882a593Smuzhiyun argv_free(argv);
9497*4882a593Smuzhiyun
9498*4882a593Smuzhiyun return ret;
9499*4882a593Smuzhiyun }
9500*4882a593Smuzhiyun
9501*4882a593Smuzhiyun #define WRITE_BUFSIZE 4096
9502*4882a593Smuzhiyun
trace_parse_run_command(struct file * file,const char __user * buffer,size_t count,loff_t * ppos,int (* createfn)(int,char **))9503*4882a593Smuzhiyun ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9504*4882a593Smuzhiyun size_t count, loff_t *ppos,
9505*4882a593Smuzhiyun int (*createfn)(int, char **))
9506*4882a593Smuzhiyun {
9507*4882a593Smuzhiyun char *kbuf, *buf, *tmp;
9508*4882a593Smuzhiyun int ret = 0;
9509*4882a593Smuzhiyun size_t done = 0;
9510*4882a593Smuzhiyun size_t size;
9511*4882a593Smuzhiyun
9512*4882a593Smuzhiyun kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9513*4882a593Smuzhiyun if (!kbuf)
9514*4882a593Smuzhiyun return -ENOMEM;
9515*4882a593Smuzhiyun
9516*4882a593Smuzhiyun while (done < count) {
9517*4882a593Smuzhiyun size = count - done;
9518*4882a593Smuzhiyun
9519*4882a593Smuzhiyun if (size >= WRITE_BUFSIZE)
9520*4882a593Smuzhiyun size = WRITE_BUFSIZE - 1;
9521*4882a593Smuzhiyun
9522*4882a593Smuzhiyun if (copy_from_user(kbuf, buffer + done, size)) {
9523*4882a593Smuzhiyun ret = -EFAULT;
9524*4882a593Smuzhiyun goto out;
9525*4882a593Smuzhiyun }
9526*4882a593Smuzhiyun kbuf[size] = '\0';
9527*4882a593Smuzhiyun buf = kbuf;
9528*4882a593Smuzhiyun do {
9529*4882a593Smuzhiyun tmp = strchr(buf, '\n');
9530*4882a593Smuzhiyun if (tmp) {
9531*4882a593Smuzhiyun *tmp = '\0';
9532*4882a593Smuzhiyun size = tmp - buf + 1;
9533*4882a593Smuzhiyun } else {
9534*4882a593Smuzhiyun size = strlen(buf);
9535*4882a593Smuzhiyun if (done + size < count) {
9536*4882a593Smuzhiyun if (buf != kbuf)
9537*4882a593Smuzhiyun break;
9538*4882a593Smuzhiyun /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9539*4882a593Smuzhiyun pr_warn("Line length is too long: Should be less than %d\n",
9540*4882a593Smuzhiyun WRITE_BUFSIZE - 2);
9541*4882a593Smuzhiyun ret = -EINVAL;
9542*4882a593Smuzhiyun goto out;
9543*4882a593Smuzhiyun }
9544*4882a593Smuzhiyun }
9545*4882a593Smuzhiyun done += size;
9546*4882a593Smuzhiyun
9547*4882a593Smuzhiyun /* Remove comments */
9548*4882a593Smuzhiyun tmp = strchr(buf, '#');
9549*4882a593Smuzhiyun
9550*4882a593Smuzhiyun if (tmp)
9551*4882a593Smuzhiyun *tmp = '\0';
9552*4882a593Smuzhiyun
9553*4882a593Smuzhiyun ret = trace_run_command(buf, createfn);
9554*4882a593Smuzhiyun if (ret)
9555*4882a593Smuzhiyun goto out;
9556*4882a593Smuzhiyun buf += size;
9557*4882a593Smuzhiyun
9558*4882a593Smuzhiyun } while (done < count);
9559*4882a593Smuzhiyun }
9560*4882a593Smuzhiyun ret = done;
9561*4882a593Smuzhiyun
9562*4882a593Smuzhiyun out:
9563*4882a593Smuzhiyun kfree(kbuf);
9564*4882a593Smuzhiyun
9565*4882a593Smuzhiyun return ret;
9566*4882a593Smuzhiyun }
9567*4882a593Smuzhiyun
tracer_alloc_buffers(void)9568*4882a593Smuzhiyun __init static int tracer_alloc_buffers(void)
9569*4882a593Smuzhiyun {
9570*4882a593Smuzhiyun int ring_buf_size;
9571*4882a593Smuzhiyun int ret = -ENOMEM;
9572*4882a593Smuzhiyun
9573*4882a593Smuzhiyun
9574*4882a593Smuzhiyun if (security_locked_down(LOCKDOWN_TRACEFS)) {
9575*4882a593Smuzhiyun pr_warn("Tracing disabled due to lockdown\n");
9576*4882a593Smuzhiyun return -EPERM;
9577*4882a593Smuzhiyun }
9578*4882a593Smuzhiyun
9579*4882a593Smuzhiyun /*
9580*4882a593Smuzhiyun * Make sure we don't accidentally add more trace options
9581*4882a593Smuzhiyun * than we have bits for.
9582*4882a593Smuzhiyun */
9583*4882a593Smuzhiyun BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9584*4882a593Smuzhiyun
9585*4882a593Smuzhiyun if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9586*4882a593Smuzhiyun goto out;
9587*4882a593Smuzhiyun
9588*4882a593Smuzhiyun if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9589*4882a593Smuzhiyun goto out_free_buffer_mask;
9590*4882a593Smuzhiyun
9591*4882a593Smuzhiyun /* Only allocate trace_printk buffers if a trace_printk exists */
9592*4882a593Smuzhiyun if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9593*4882a593Smuzhiyun /* Must be called before global_trace.buffer is allocated */
9594*4882a593Smuzhiyun trace_printk_init_buffers();
9595*4882a593Smuzhiyun
9596*4882a593Smuzhiyun /* To save memory, keep the ring buffer size to its minimum */
9597*4882a593Smuzhiyun if (ring_buffer_expanded)
9598*4882a593Smuzhiyun ring_buf_size = trace_buf_size;
9599*4882a593Smuzhiyun else
9600*4882a593Smuzhiyun ring_buf_size = 1;
9601*4882a593Smuzhiyun
9602*4882a593Smuzhiyun cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9603*4882a593Smuzhiyun cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9604*4882a593Smuzhiyun
9605*4882a593Smuzhiyun raw_spin_lock_init(&global_trace.start_lock);
9606*4882a593Smuzhiyun
9607*4882a593Smuzhiyun /*
9608*4882a593Smuzhiyun * The prepare callbacks allocates some memory for the ring buffer. We
9609*4882a593Smuzhiyun * don't free the buffer if the CPU goes down. If we were to free
9610*4882a593Smuzhiyun * the buffer, then the user would lose any trace that was in the
9611*4882a593Smuzhiyun * buffer. The memory will be removed once the "instance" is removed.
9612*4882a593Smuzhiyun */
9613*4882a593Smuzhiyun ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9614*4882a593Smuzhiyun "trace/RB:preapre", trace_rb_cpu_prepare,
9615*4882a593Smuzhiyun NULL);
9616*4882a593Smuzhiyun if (ret < 0)
9617*4882a593Smuzhiyun goto out_free_cpumask;
9618*4882a593Smuzhiyun /* Used for event triggers */
9619*4882a593Smuzhiyun ret = -ENOMEM;
9620*4882a593Smuzhiyun temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9621*4882a593Smuzhiyun if (!temp_buffer)
9622*4882a593Smuzhiyun goto out_rm_hp_state;
9623*4882a593Smuzhiyun
9624*4882a593Smuzhiyun if (trace_create_savedcmd() < 0)
9625*4882a593Smuzhiyun goto out_free_temp_buffer;
9626*4882a593Smuzhiyun
9627*4882a593Smuzhiyun /* TODO: make the number of buffers hot pluggable with CPUS */
9628*4882a593Smuzhiyun if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9629*4882a593Smuzhiyun MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9630*4882a593Smuzhiyun goto out_free_savedcmd;
9631*4882a593Smuzhiyun }
9632*4882a593Smuzhiyun
9633*4882a593Smuzhiyun if (global_trace.buffer_disabled)
9634*4882a593Smuzhiyun tracing_off();
9635*4882a593Smuzhiyun
9636*4882a593Smuzhiyun if (trace_boot_clock) {
9637*4882a593Smuzhiyun ret = tracing_set_clock(&global_trace, trace_boot_clock);
9638*4882a593Smuzhiyun if (ret < 0)
9639*4882a593Smuzhiyun pr_warn("Trace clock %s not defined, going back to default\n",
9640*4882a593Smuzhiyun trace_boot_clock);
9641*4882a593Smuzhiyun }
9642*4882a593Smuzhiyun
9643*4882a593Smuzhiyun /*
9644*4882a593Smuzhiyun * register_tracer() might reference current_trace, so it
9645*4882a593Smuzhiyun * needs to be set before we register anything. This is
9646*4882a593Smuzhiyun * just a bootstrap of current_trace anyway.
9647*4882a593Smuzhiyun */
9648*4882a593Smuzhiyun global_trace.current_trace = &nop_trace;
9649*4882a593Smuzhiyun
9650*4882a593Smuzhiyun global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9651*4882a593Smuzhiyun
9652*4882a593Smuzhiyun ftrace_init_global_array_ops(&global_trace);
9653*4882a593Smuzhiyun
9654*4882a593Smuzhiyun init_trace_flags_index(&global_trace);
9655*4882a593Smuzhiyun
9656*4882a593Smuzhiyun register_tracer(&nop_trace);
9657*4882a593Smuzhiyun
9658*4882a593Smuzhiyun /* Function tracing may start here (via kernel command line) */
9659*4882a593Smuzhiyun init_function_trace();
9660*4882a593Smuzhiyun
9661*4882a593Smuzhiyun /* All seems OK, enable tracing */
9662*4882a593Smuzhiyun tracing_disabled = 0;
9663*4882a593Smuzhiyun
9664*4882a593Smuzhiyun atomic_notifier_chain_register(&panic_notifier_list,
9665*4882a593Smuzhiyun &trace_panic_notifier);
9666*4882a593Smuzhiyun
9667*4882a593Smuzhiyun register_die_notifier(&trace_die_notifier);
9668*4882a593Smuzhiyun
9669*4882a593Smuzhiyun global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9670*4882a593Smuzhiyun
9671*4882a593Smuzhiyun INIT_LIST_HEAD(&global_trace.systems);
9672*4882a593Smuzhiyun INIT_LIST_HEAD(&global_trace.events);
9673*4882a593Smuzhiyun INIT_LIST_HEAD(&global_trace.hist_vars);
9674*4882a593Smuzhiyun INIT_LIST_HEAD(&global_trace.err_log);
9675*4882a593Smuzhiyun list_add(&global_trace.list, &ftrace_trace_arrays);
9676*4882a593Smuzhiyun
9677*4882a593Smuzhiyun apply_trace_boot_options();
9678*4882a593Smuzhiyun
9679*4882a593Smuzhiyun register_snapshot_cmd();
9680*4882a593Smuzhiyun
9681*4882a593Smuzhiyun return 0;
9682*4882a593Smuzhiyun
9683*4882a593Smuzhiyun out_free_savedcmd:
9684*4882a593Smuzhiyun free_saved_cmdlines_buffer(savedcmd);
9685*4882a593Smuzhiyun out_free_temp_buffer:
9686*4882a593Smuzhiyun ring_buffer_free(temp_buffer);
9687*4882a593Smuzhiyun out_rm_hp_state:
9688*4882a593Smuzhiyun cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9689*4882a593Smuzhiyun out_free_cpumask:
9690*4882a593Smuzhiyun free_cpumask_var(global_trace.tracing_cpumask);
9691*4882a593Smuzhiyun out_free_buffer_mask:
9692*4882a593Smuzhiyun free_cpumask_var(tracing_buffer_mask);
9693*4882a593Smuzhiyun out:
9694*4882a593Smuzhiyun return ret;
9695*4882a593Smuzhiyun }
9696*4882a593Smuzhiyun
early_trace_init(void)9697*4882a593Smuzhiyun void __init early_trace_init(void)
9698*4882a593Smuzhiyun {
9699*4882a593Smuzhiyun if (tracepoint_printk) {
9700*4882a593Smuzhiyun tracepoint_print_iter =
9701*4882a593Smuzhiyun kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9702*4882a593Smuzhiyun if (MEM_FAIL(!tracepoint_print_iter,
9703*4882a593Smuzhiyun "Failed to allocate trace iterator\n"))
9704*4882a593Smuzhiyun tracepoint_printk = 0;
9705*4882a593Smuzhiyun else
9706*4882a593Smuzhiyun static_key_enable(&tracepoint_printk_key.key);
9707*4882a593Smuzhiyun }
9708*4882a593Smuzhiyun tracer_alloc_buffers();
9709*4882a593Smuzhiyun }
9710*4882a593Smuzhiyun
trace_init(void)9711*4882a593Smuzhiyun void __init trace_init(void)
9712*4882a593Smuzhiyun {
9713*4882a593Smuzhiyun trace_event_init();
9714*4882a593Smuzhiyun }
9715*4882a593Smuzhiyun
clear_boot_tracer(void)9716*4882a593Smuzhiyun __init static int clear_boot_tracer(void)
9717*4882a593Smuzhiyun {
9718*4882a593Smuzhiyun /*
9719*4882a593Smuzhiyun * The default tracer at boot buffer is an init section.
9720*4882a593Smuzhiyun * This function is called in lateinit. If we did not
9721*4882a593Smuzhiyun * find the boot tracer, then clear it out, to prevent
9722*4882a593Smuzhiyun * later registration from accessing the buffer that is
9723*4882a593Smuzhiyun * about to be freed.
9724*4882a593Smuzhiyun */
9725*4882a593Smuzhiyun if (!default_bootup_tracer)
9726*4882a593Smuzhiyun return 0;
9727*4882a593Smuzhiyun
9728*4882a593Smuzhiyun printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9729*4882a593Smuzhiyun default_bootup_tracer);
9730*4882a593Smuzhiyun default_bootup_tracer = NULL;
9731*4882a593Smuzhiyun
9732*4882a593Smuzhiyun return 0;
9733*4882a593Smuzhiyun }
9734*4882a593Smuzhiyun
9735*4882a593Smuzhiyun fs_initcall(tracer_init_tracefs);
9736*4882a593Smuzhiyun late_initcall_sync(clear_boot_tracer);
9737*4882a593Smuzhiyun
9738*4882a593Smuzhiyun #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
tracing_set_default_clock(void)9739*4882a593Smuzhiyun __init static int tracing_set_default_clock(void)
9740*4882a593Smuzhiyun {
9741*4882a593Smuzhiyun /* sched_clock_stable() is determined in late_initcall */
9742*4882a593Smuzhiyun if (!trace_boot_clock && !sched_clock_stable()) {
9743*4882a593Smuzhiyun if (security_locked_down(LOCKDOWN_TRACEFS)) {
9744*4882a593Smuzhiyun pr_warn("Can not set tracing clock due to lockdown\n");
9745*4882a593Smuzhiyun return -EPERM;
9746*4882a593Smuzhiyun }
9747*4882a593Smuzhiyun
9748*4882a593Smuzhiyun printk(KERN_WARNING
9749*4882a593Smuzhiyun "Unstable clock detected, switching default tracing clock to \"global\"\n"
9750*4882a593Smuzhiyun "If you want to keep using the local clock, then add:\n"
9751*4882a593Smuzhiyun " \"trace_clock=local\"\n"
9752*4882a593Smuzhiyun "on the kernel command line\n");
9753*4882a593Smuzhiyun tracing_set_clock(&global_trace, "global");
9754*4882a593Smuzhiyun }
9755*4882a593Smuzhiyun
9756*4882a593Smuzhiyun return 0;
9757*4882a593Smuzhiyun }
9758*4882a593Smuzhiyun late_initcall_sync(tracing_set_default_clock);
9759*4882a593Smuzhiyun #endif
9760