Lines Matching refs:evsel
19 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr) in perf_evsel__init() argument
21 INIT_LIST_HEAD(&evsel->node); in perf_evsel__init()
22 evsel->attr = *attr; in perf_evsel__init()
27 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); in perf_evsel__new() local
29 if (evsel != NULL) in perf_evsel__new()
30 perf_evsel__init(evsel, attr); in perf_evsel__new()
32 return evsel; in perf_evsel__new()
35 void perf_evsel__delete(struct perf_evsel *evsel) in perf_evsel__delete() argument
37 free(evsel); in perf_evsel__delete()
42 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_fd() argument
44 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); in perf_evsel__alloc_fd()
46 if (evsel->fd) { in perf_evsel__alloc_fd()
50 FD(evsel, cpu, thread) = -1; in perf_evsel__alloc_fd()
55 return evsel->fd != NULL ? 0 : -ENOMEM; in perf_evsel__alloc_fd()
66 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, in perf_evsel__open() argument
95 if (evsel->fd == NULL && in perf_evsel__open()
96 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) in perf_evsel__open()
103 fd = sys_perf_event_open(&evsel->attr, in perf_evsel__open()
110 FD(evsel, cpu, thread) = fd; in perf_evsel__open()
117 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu) in perf_evsel__close_fd_cpu() argument
121 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { in perf_evsel__close_fd_cpu()
122 if (FD(evsel, cpu, thread) >= 0) in perf_evsel__close_fd_cpu()
123 close(FD(evsel, cpu, thread)); in perf_evsel__close_fd_cpu()
124 FD(evsel, cpu, thread) = -1; in perf_evsel__close_fd_cpu()
128 void perf_evsel__close_fd(struct perf_evsel *evsel) in perf_evsel__close_fd() argument
132 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) in perf_evsel__close_fd()
133 perf_evsel__close_fd_cpu(evsel, cpu); in perf_evsel__close_fd()
136 void perf_evsel__free_fd(struct perf_evsel *evsel) in perf_evsel__free_fd() argument
138 xyarray__delete(evsel->fd); in perf_evsel__free_fd()
139 evsel->fd = NULL; in perf_evsel__free_fd()
142 void perf_evsel__close(struct perf_evsel *evsel) in perf_evsel__close() argument
144 if (evsel->fd == NULL) in perf_evsel__close()
147 perf_evsel__close_fd(evsel); in perf_evsel__close()
148 perf_evsel__free_fd(evsel); in perf_evsel__close()
151 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu) in perf_evsel__close_cpu() argument
153 if (evsel->fd == NULL) in perf_evsel__close_cpu()
156 perf_evsel__close_fd_cpu(evsel, cpu); in perf_evsel__close_cpu()
159 int perf_evsel__read_size(struct perf_evsel *evsel) in perf_evsel__read_size() argument
161 u64 read_format = evsel->attr.read_format; in perf_evsel__read_size()
176 nr = evsel->nr_members; in perf_evsel__read_size()
184 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, in perf_evsel__read() argument
187 size_t size = perf_evsel__read_size(evsel); in perf_evsel__read()
191 if (FD(evsel, cpu, thread) < 0) in perf_evsel__read()
194 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0) in perf_evsel__read()
200 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, in perf_evsel__run_ioctl() argument
206 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { in perf_evsel__run_ioctl()
207 int fd = FD(evsel, cpu, thread), in perf_evsel__run_ioctl()
217 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu) in perf_evsel__enable_cpu() argument
219 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu); in perf_evsel__enable_cpu()
222 int perf_evsel__enable(struct perf_evsel *evsel) in perf_evsel__enable() argument
227 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) in perf_evsel__enable()
228 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i); in perf_evsel__enable()
232 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu) in perf_evsel__disable_cpu() argument
234 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu); in perf_evsel__disable_cpu()
237 int perf_evsel__disable(struct perf_evsel *evsel) in perf_evsel__disable() argument
242 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) in perf_evsel__disable()
243 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i); in perf_evsel__disable()
247 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter) in perf_evsel__apply_filter() argument
251 for (i = 0; i < evsel->cpus->nr && !err; i++) in perf_evsel__apply_filter()
252 err = perf_evsel__run_ioctl(evsel, in perf_evsel__apply_filter()
258 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) in perf_evsel__cpus() argument
260 return evsel->cpus; in perf_evsel__cpus()
263 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel) in perf_evsel__threads() argument
265 return evsel->threads; in perf_evsel__threads()
268 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel) in perf_evsel__attr() argument
270 return &evsel->attr; in perf_evsel__attr()
273 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_id() argument
278 if (evsel->system_wide) in perf_evsel__alloc_id()
281 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); in perf_evsel__alloc_id()
282 if (evsel->sample_id == NULL) in perf_evsel__alloc_id()
285 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); in perf_evsel__alloc_id()
286 if (evsel->id == NULL) { in perf_evsel__alloc_id()
287 xyarray__delete(evsel->sample_id); in perf_evsel__alloc_id()
288 evsel->sample_id = NULL; in perf_evsel__alloc_id()
295 void perf_evsel__free_id(struct perf_evsel *evsel) in perf_evsel__free_id() argument
297 xyarray__delete(evsel->sample_id); in perf_evsel__free_id()
298 evsel->sample_id = NULL; in perf_evsel__free_id()
299 zfree(&evsel->id); in perf_evsel__free_id()
300 evsel->ids = 0; in perf_evsel__free_id()