1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef __LIBPERF_INTERNAL_EVSEL_H 3*4882a593Smuzhiyun #define __LIBPERF_INTERNAL_EVSEL_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #include <linux/types.h> 6*4882a593Smuzhiyun #include <linux/perf_event.h> 7*4882a593Smuzhiyun #include <stdbool.h> 8*4882a593Smuzhiyun #include <sys/types.h> 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun struct perf_cpu_map; 11*4882a593Smuzhiyun struct perf_thread_map; 12*4882a593Smuzhiyun struct xyarray; 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* 15*4882a593Smuzhiyun * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are 16*4882a593Smuzhiyun * more than one entry in the evlist. 17*4882a593Smuzhiyun */ 18*4882a593Smuzhiyun struct perf_sample_id { 19*4882a593Smuzhiyun struct hlist_node node; 20*4882a593Smuzhiyun u64 id; 21*4882a593Smuzhiyun struct perf_evsel *evsel; 22*4882a593Smuzhiyun /* 23*4882a593Smuzhiyun * 'idx' will be used for AUX area sampling. A sample will have AUX area 24*4882a593Smuzhiyun * data that will be queued for decoding, where there are separate 25*4882a593Smuzhiyun * queues for each CPU (per-cpu tracing) or task (per-thread tracing). 26*4882a593Smuzhiyun * The sample ID can be used to lookup 'idx' which is effectively the 27*4882a593Smuzhiyun * queue number. 28*4882a593Smuzhiyun */ 29*4882a593Smuzhiyun int idx; 30*4882a593Smuzhiyun int cpu; 31*4882a593Smuzhiyun pid_t tid; 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun /* Holds total ID period value for PERF_SAMPLE_READ processing. */ 34*4882a593Smuzhiyun u64 period; 35*4882a593Smuzhiyun }; 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun struct perf_evsel { 38*4882a593Smuzhiyun struct list_head node; 39*4882a593Smuzhiyun struct perf_event_attr attr; 40*4882a593Smuzhiyun struct perf_cpu_map *cpus; 41*4882a593Smuzhiyun struct perf_cpu_map *own_cpus; 42*4882a593Smuzhiyun struct perf_thread_map *threads; 43*4882a593Smuzhiyun struct xyarray *fd; 44*4882a593Smuzhiyun struct xyarray *sample_id; 45*4882a593Smuzhiyun u64 *id; 46*4882a593Smuzhiyun u32 ids; 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun /* parse modifier helper */ 49*4882a593Smuzhiyun int nr_members; 50*4882a593Smuzhiyun bool system_wide; 51*4882a593Smuzhiyun }; 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr); 54*4882a593Smuzhiyun int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 55*4882a593Smuzhiyun void perf_evsel__close_fd(struct perf_evsel *evsel); 56*4882a593Smuzhiyun void perf_evsel__free_fd(struct perf_evsel *evsel); 57*4882a593Smuzhiyun int perf_evsel__read_size(struct perf_evsel *evsel); 58*4882a593Smuzhiyun int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter); 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); 61*4882a593Smuzhiyun void perf_evsel__free_id(struct perf_evsel *evsel); 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun #endif /* __LIBPERF_INTERNAL_EVSEL_H */ 64