1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __PERF_EVSEL_H
3*4882a593Smuzhiyun #define __PERF_EVSEL_H 1
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/list.h>
6*4882a593Smuzhiyun #include <stdbool.h>
7*4882a593Smuzhiyun #include <sys/types.h>
8*4882a593Smuzhiyun #include <linux/perf_event.h>
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <internal/evsel.h>
11*4882a593Smuzhiyun #include <perf/evsel.h>
12*4882a593Smuzhiyun #include "symbol_conf.h"
13*4882a593Smuzhiyun #include <internal/cpumap.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun struct bpf_object;
16*4882a593Smuzhiyun struct cgroup;
17*4882a593Smuzhiyun struct perf_counts;
18*4882a593Smuzhiyun struct perf_stat_evsel;
19*4882a593Smuzhiyun union perf_event;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun enum perf_tool_event {
24*4882a593Smuzhiyun PERF_TOOL_NONE = 0,
25*4882a593Smuzhiyun PERF_TOOL_DURATION_TIME = 1,
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /** struct evsel - event selector
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * @evlist - evlist this evsel is in, if it is in one.
31*4882a593Smuzhiyun * @core - libperf evsel object
32*4882a593Smuzhiyun * @name - Can be set to retain the original event name passed by the user,
33*4882a593Smuzhiyun * so that when showing results in tools such as 'perf stat', we
34*4882a593Smuzhiyun * show the name used, not some alias.
35*4882a593Smuzhiyun * @id_pos: the position of the event id (PERF_SAMPLE_ID or
36*4882a593Smuzhiyun * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
37*4882a593Smuzhiyun * struct perf_record_sample
38*4882a593Smuzhiyun * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
39*4882a593Smuzhiyun * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
40*4882a593Smuzhiyun * is used there is an id sample appended to non-sample events
41*4882a593Smuzhiyun * @priv: And what is in its containing unnamed union are tool specific
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun struct evsel {
44*4882a593Smuzhiyun struct perf_evsel core;
45*4882a593Smuzhiyun struct evlist *evlist;
46*4882a593Smuzhiyun off_t id_offset;
47*4882a593Smuzhiyun int idx;
48*4882a593Smuzhiyun int id_pos;
49*4882a593Smuzhiyun int is_pos;
50*4882a593Smuzhiyun unsigned int sample_size;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * These fields can be set in the parse-events code or similar.
54*4882a593Smuzhiyun * Please check evsel__clone() to copy them properly so that
55*4882a593Smuzhiyun * they can be released properly.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun struct {
58*4882a593Smuzhiyun char *name;
59*4882a593Smuzhiyun char *group_name;
60*4882a593Smuzhiyun const char *pmu_name;
61*4882a593Smuzhiyun struct tep_event *tp_format;
62*4882a593Smuzhiyun char *filter;
63*4882a593Smuzhiyun unsigned long max_events;
64*4882a593Smuzhiyun double scale;
65*4882a593Smuzhiyun const char *unit;
66*4882a593Smuzhiyun struct cgroup *cgrp;
67*4882a593Smuzhiyun enum perf_tool_event tool_event;
68*4882a593Smuzhiyun /* parse modifier helper */
69*4882a593Smuzhiyun int exclude_GH;
70*4882a593Smuzhiyun int sample_read;
71*4882a593Smuzhiyun bool snapshot;
72*4882a593Smuzhiyun bool per_pkg;
73*4882a593Smuzhiyun bool percore;
74*4882a593Smuzhiyun bool precise_max;
75*4882a593Smuzhiyun bool use_uncore_alias;
76*4882a593Smuzhiyun bool is_libpfm_event;
77*4882a593Smuzhiyun bool auto_merge_stats;
78*4882a593Smuzhiyun bool collect_stat;
79*4882a593Smuzhiyun bool weak_group;
80*4882a593Smuzhiyun int bpf_fd;
81*4882a593Smuzhiyun struct bpf_object *bpf_obj;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * metric fields are similar, but needs more care as they can have
86*4882a593Smuzhiyun * references to other metric (evsel).
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun const char * metric_expr;
89*4882a593Smuzhiyun const char * metric_name;
90*4882a593Smuzhiyun struct evsel **metric_events;
91*4882a593Smuzhiyun struct evsel *metric_leader;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun void *handler;
94*4882a593Smuzhiyun struct perf_counts *counts;
95*4882a593Smuzhiyun struct perf_counts *prev_raw_counts;
96*4882a593Smuzhiyun unsigned long nr_events_printed;
97*4882a593Smuzhiyun struct perf_stat_evsel *stats;
98*4882a593Smuzhiyun void *priv;
99*4882a593Smuzhiyun u64 db_id;
100*4882a593Smuzhiyun bool uniquified_name;
101*4882a593Smuzhiyun bool supported;
102*4882a593Smuzhiyun bool needs_swap;
103*4882a593Smuzhiyun bool disabled;
104*4882a593Smuzhiyun bool no_aux_samples;
105*4882a593Smuzhiyun bool immediate;
106*4882a593Smuzhiyun bool tracking;
107*4882a593Smuzhiyun bool ignore_missing_thread;
108*4882a593Smuzhiyun bool forced_leader;
109*4882a593Smuzhiyun bool cmdline_group_boundary;
110*4882a593Smuzhiyun bool merged_stat;
111*4882a593Smuzhiyun bool reset_group;
112*4882a593Smuzhiyun bool errored;
113*4882a593Smuzhiyun unsigned long *per_pkg_mask;
114*4882a593Smuzhiyun struct evsel *leader;
115*4882a593Smuzhiyun struct list_head config_terms;
116*4882a593Smuzhiyun int err;
117*4882a593Smuzhiyun int cpu_iter;
118*4882a593Smuzhiyun struct {
119*4882a593Smuzhiyun evsel__sb_cb_t *cb;
120*4882a593Smuzhiyun void *data;
121*4882a593Smuzhiyun } side_band;
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * For reporting purposes, an evsel sample can have a callchain
124*4882a593Smuzhiyun * synthesized from AUX area data. Keep track of synthesized sample
125*4882a593Smuzhiyun * types here. Note, the recorded sample_type cannot be changed because
126*4882a593Smuzhiyun * it is needed to continue to parse events.
127*4882a593Smuzhiyun * See also evsel__has_callchain().
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun __u64 synth_sample_type;
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun struct perf_missing_features {
133*4882a593Smuzhiyun bool sample_id_all;
134*4882a593Smuzhiyun bool exclude_guest;
135*4882a593Smuzhiyun bool mmap2;
136*4882a593Smuzhiyun bool cloexec;
137*4882a593Smuzhiyun bool clockid;
138*4882a593Smuzhiyun bool clockid_wrong;
139*4882a593Smuzhiyun bool lbr_flags;
140*4882a593Smuzhiyun bool write_backward;
141*4882a593Smuzhiyun bool group_read;
142*4882a593Smuzhiyun bool ksymbol;
143*4882a593Smuzhiyun bool bpf;
144*4882a593Smuzhiyun bool aux_output;
145*4882a593Smuzhiyun bool branch_hw_idx;
146*4882a593Smuzhiyun bool cgroup;
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun extern struct perf_missing_features perf_missing_features;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun struct perf_cpu_map;
152*4882a593Smuzhiyun struct target;
153*4882a593Smuzhiyun struct thread_map;
154*4882a593Smuzhiyun struct record_opts;
155*4882a593Smuzhiyun
evsel__cpus(struct evsel * evsel)156*4882a593Smuzhiyun static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun return perf_evsel__cpus(&evsel->core);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
evsel__nr_cpus(struct evsel * evsel)161*4882a593Smuzhiyun static inline int evsel__nr_cpus(struct evsel *evsel)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return evsel__cpus(evsel)->nr;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun void perf_counts_values__scale(struct perf_counts_values *count,
167*4882a593Smuzhiyun bool scale, s8 *pscaled);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
170*4882a593Smuzhiyun struct perf_counts_values *count);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun int evsel__object_config(size_t object_size,
173*4882a593Smuzhiyun int (*init)(struct evsel *evsel),
174*4882a593Smuzhiyun void (*fini)(struct evsel *evsel));
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun struct perf_pmu *evsel__find_pmu(struct evsel *evsel);
177*4882a593Smuzhiyun bool evsel__is_aux_event(struct evsel *evsel);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx);
180*4882a593Smuzhiyun
evsel__new(struct perf_event_attr * attr)181*4882a593Smuzhiyun static inline struct evsel *evsel__new(struct perf_event_attr *attr)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun return evsel__new_idx(attr, 0);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun struct evsel *evsel__clone(struct evsel *orig);
187*4882a593Smuzhiyun struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * Returns pointer with encoded error via <linux/err.h> interface.
191*4882a593Smuzhiyun */
evsel__newtp(const char * sys,const char * name)192*4882a593Smuzhiyun static inline struct evsel *evsel__newtp(const char *sys, const char *name)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun return evsel__newtp_idx(sys, name, 0);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun struct evsel *evsel__new_cycles(bool precise);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun struct tep_event *event_format__new(const char *sys, const char *name);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
202*4882a593Smuzhiyun void evsel__exit(struct evsel *evsel);
203*4882a593Smuzhiyun void evsel__delete(struct evsel *evsel);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun struct callchain_param;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun void evsel__config(struct evsel *evsel, struct record_opts *opts,
208*4882a593Smuzhiyun struct callchain_param *callchain);
209*4882a593Smuzhiyun void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
210*4882a593Smuzhiyun struct callchain_param *callchain);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun int __evsel__sample_size(u64 sample_type);
213*4882a593Smuzhiyun void evsel__calc_id_pos(struct evsel *evsel);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun bool evsel__is_cache_op_valid(u8 type, u8 op);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun #define EVSEL__MAX_ALIASES 8
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
220*4882a593Smuzhiyun extern const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES];
221*4882a593Smuzhiyun extern const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
222*4882a593Smuzhiyun extern const char *evsel__hw_names[PERF_COUNT_HW_MAX];
223*4882a593Smuzhiyun extern const char *evsel__sw_names[PERF_COUNT_SW_MAX];
224*4882a593Smuzhiyun int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
225*4882a593Smuzhiyun const char *evsel__name(struct evsel *evsel);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun const char *evsel__group_name(struct evsel *evsel);
228*4882a593Smuzhiyun int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun void __evsel__set_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
231*4882a593Smuzhiyun void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun #define evsel__set_sample_bit(evsel, bit) \
234*4882a593Smuzhiyun __evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun #define evsel__reset_sample_bit(evsel, bit) \
237*4882a593Smuzhiyun __evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun int evsel__set_filter(struct evsel *evsel, const char *filter);
242*4882a593Smuzhiyun int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
243*4882a593Smuzhiyun int evsel__append_addr_filter(struct evsel *evsel, const char *filter);
244*4882a593Smuzhiyun int evsel__enable_cpu(struct evsel *evsel, int cpu);
245*4882a593Smuzhiyun int evsel__enable(struct evsel *evsel);
246*4882a593Smuzhiyun int evsel__disable(struct evsel *evsel);
247*4882a593Smuzhiyun int evsel__disable_cpu(struct evsel *evsel, int cpu);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu);
250*4882a593Smuzhiyun int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads);
251*4882a593Smuzhiyun int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
252*4882a593Smuzhiyun struct perf_thread_map *threads);
253*4882a593Smuzhiyun void evsel__close(struct evsel *evsel);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun struct perf_sample;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name);
258*4882a593Smuzhiyun u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name);
259*4882a593Smuzhiyun
evsel__strval(struct evsel * evsel,struct perf_sample * sample,const char * name)260*4882a593Smuzhiyun static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun return evsel__rawptr(evsel, sample, name);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun struct tep_format_field;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun struct tep_format_field *evsel__field(struct evsel *evsel, const char *name);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun #define evsel__match(evsel, t, c) \
272*4882a593Smuzhiyun (evsel->core.attr.type == PERF_TYPE_##t && \
273*4882a593Smuzhiyun evsel->core.attr.config == PERF_COUNT_##c)
274*4882a593Smuzhiyun
evsel__match2(struct evsel * e1,struct evsel * e2)275*4882a593Smuzhiyun static inline bool evsel__match2(struct evsel *e1, struct evsel *e2)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun return (e1->core.attr.type == e2->core.attr.type) &&
278*4882a593Smuzhiyun (e1->core.attr.config == e2->core.attr.config);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun int evsel__read_counter(struct evsel *evsel, int cpu, int thread);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /**
286*4882a593Smuzhiyun * evsel__read_on_cpu - Read out the results on a CPU and thread
287*4882a593Smuzhiyun *
288*4882a593Smuzhiyun * @evsel - event selector to read value
289*4882a593Smuzhiyun * @cpu - CPU of interest
290*4882a593Smuzhiyun * @thread - thread of interest
291*4882a593Smuzhiyun */
evsel__read_on_cpu(struct evsel * evsel,int cpu,int thread)292*4882a593Smuzhiyun static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun return __evsel__read_on_cpu(evsel, cpu, thread, false);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun * evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * @evsel - event selector to read value
301*4882a593Smuzhiyun * @cpu - CPU of interest
302*4882a593Smuzhiyun * @thread - thread of interest
303*4882a593Smuzhiyun */
evsel__read_on_cpu_scaled(struct evsel * evsel,int cpu,int thread)304*4882a593Smuzhiyun static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu, int thread)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun return __evsel__read_on_cpu(evsel, cpu, thread, true);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
310*4882a593Smuzhiyun struct perf_sample *sample);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
313*4882a593Smuzhiyun u64 *timestamp);
314*4882a593Smuzhiyun
evsel__next(struct evsel * evsel)315*4882a593Smuzhiyun static inline struct evsel *evsel__next(struct evsel *evsel)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun return list_entry(evsel->core.node.next, struct evsel, core.node);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
evsel__prev(struct evsel * evsel)320*4882a593Smuzhiyun static inline struct evsel *evsel__prev(struct evsel *evsel)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun return list_entry(evsel->core.node.prev, struct evsel, core.node);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun * evsel__is_group_leader - Return whether given evsel is a leader event
327*4882a593Smuzhiyun *
328*4882a593Smuzhiyun * @evsel - evsel selector to be tested
329*4882a593Smuzhiyun *
330*4882a593Smuzhiyun * Return %true if @evsel is a group leader or a stand-alone event
331*4882a593Smuzhiyun */
evsel__is_group_leader(const struct evsel * evsel)332*4882a593Smuzhiyun static inline bool evsel__is_group_leader(const struct evsel *evsel)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun return evsel->leader == evsel;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /**
338*4882a593Smuzhiyun * evsel__is_group_event - Return whether given evsel is a group event
339*4882a593Smuzhiyun *
340*4882a593Smuzhiyun * @evsel - evsel selector to be tested
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * Return %true iff event group view is enabled and @evsel is a actual group
343*4882a593Smuzhiyun * leader which has other members in the group
344*4882a593Smuzhiyun */
evsel__is_group_event(struct evsel * evsel)345*4882a593Smuzhiyun static inline bool evsel__is_group_event(struct evsel *evsel)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun if (!symbol_conf.event_group)
348*4882a593Smuzhiyun return false;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun return evsel__is_group_leader(evsel) && evsel->core.nr_members > 1;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun bool evsel__is_function_event(struct evsel *evsel);
354*4882a593Smuzhiyun
evsel__is_bpf_output(struct evsel * evsel)355*4882a593Smuzhiyun static inline bool evsel__is_bpf_output(struct evsel *evsel)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun return evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
evsel__is_clock(struct evsel * evsel)360*4882a593Smuzhiyun static inline bool evsel__is_clock(struct evsel *evsel)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun return evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
363*4882a593Smuzhiyun evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize);
367*4882a593Smuzhiyun int evsel__open_strerror(struct evsel *evsel, struct target *target,
368*4882a593Smuzhiyun int err, char *msg, size_t size);
369*4882a593Smuzhiyun
evsel__group_idx(struct evsel * evsel)370*4882a593Smuzhiyun static inline int evsel__group_idx(struct evsel *evsel)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun return evsel->idx - evsel->leader->idx;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Iterates group WITHOUT the leader. */
376*4882a593Smuzhiyun #define for_each_group_member(_evsel, _leader) \
377*4882a593Smuzhiyun for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
378*4882a593Smuzhiyun (_evsel) && (_evsel)->leader == (_leader); \
379*4882a593Smuzhiyun (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* Iterates group WITH the leader. */
382*4882a593Smuzhiyun #define for_each_group_evsel(_evsel, _leader) \
383*4882a593Smuzhiyun for ((_evsel) = _leader; \
384*4882a593Smuzhiyun (_evsel) && (_evsel)->leader == (_leader); \
385*4882a593Smuzhiyun (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
386*4882a593Smuzhiyun
evsel__has_branch_callstack(const struct evsel * evsel)387*4882a593Smuzhiyun static inline bool evsel__has_branch_callstack(const struct evsel *evsel)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
evsel__has_branch_hw_idx(const struct evsel * evsel)392*4882a593Smuzhiyun static inline bool evsel__has_branch_hw_idx(const struct evsel *evsel)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
evsel__has_callchain(const struct evsel * evsel)397*4882a593Smuzhiyun static inline bool evsel__has_callchain(const struct evsel *evsel)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun /*
400*4882a593Smuzhiyun * For reporting purposes, an evsel sample can have a recorded callchain
401*4882a593Smuzhiyun * or a callchain synthesized from AUX area data.
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun return evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN ||
404*4882a593Smuzhiyun evsel->synth_sample_type & PERF_SAMPLE_CALLCHAIN;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
evsel__has_br_stack(const struct evsel * evsel)407*4882a593Smuzhiyun static inline bool evsel__has_br_stack(const struct evsel *evsel)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun /*
410*4882a593Smuzhiyun * For reporting purposes, an evsel sample can have a recorded branch
411*4882a593Smuzhiyun * stack or a branch stack synthesized from AUX area data.
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun return evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK ||
414*4882a593Smuzhiyun evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
evsel__is_dummy_event(struct evsel * evsel)417*4882a593Smuzhiyun static inline bool evsel__is_dummy_event(struct evsel *evsel)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
420*4882a593Smuzhiyun (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun struct perf_env *evsel__env(struct evsel *evsel);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
426*4882a593Smuzhiyun #endif /* __PERF_EVSEL_H */
427