Lines Matching refs:evlist
27 void perf_evlist__init(struct perf_evlist *evlist) in perf_evlist__init() argument
32 INIT_HLIST_HEAD(&evlist->heads[i]); in perf_evlist__init()
33 INIT_LIST_HEAD(&evlist->entries); in perf_evlist__init()
34 evlist->nr_entries = 0; in perf_evlist__init()
35 fdarray__init(&evlist->pollfd, 64); in perf_evlist__init()
38 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, in __perf_evlist__propagate_maps() argument
45 if (!evsel->own_cpus || evlist->has_user_cpus) { in __perf_evlist__propagate_maps()
47 evsel->cpus = perf_cpu_map__get(evlist->cpus); in __perf_evlist__propagate_maps()
48 } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) { in __perf_evlist__propagate_maps()
50 evsel->cpus = perf_cpu_map__get(evlist->cpus); in __perf_evlist__propagate_maps()
57 evsel->threads = perf_thread_map__get(evlist->threads); in __perf_evlist__propagate_maps()
58 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus); in __perf_evlist__propagate_maps()
61 static void perf_evlist__propagate_maps(struct perf_evlist *evlist) in perf_evlist__propagate_maps() argument
65 perf_evlist__for_each_evsel(evlist, evsel) in perf_evlist__propagate_maps()
66 __perf_evlist__propagate_maps(evlist, evsel); in perf_evlist__propagate_maps()
69 void perf_evlist__add(struct perf_evlist *evlist, in perf_evlist__add() argument
72 list_add_tail(&evsel->node, &evlist->entries); in perf_evlist__add()
73 evlist->nr_entries += 1; in perf_evlist__add()
74 __perf_evlist__propagate_maps(evlist, evsel); in perf_evlist__add()
77 void perf_evlist__remove(struct perf_evlist *evlist, in perf_evlist__remove() argument
81 evlist->nr_entries -= 1; in perf_evlist__remove()
86 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); in perf_evlist__new() local
88 if (evlist != NULL) in perf_evlist__new()
89 perf_evlist__init(evlist); in perf_evlist__new()
91 return evlist; in perf_evlist__new()
95 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev) in perf_evlist__next() argument
100 next = list_first_entry(&evlist->entries, in perf_evlist__next()
108 if (&next->node == &evlist->entries) in perf_evlist__next()
114 static void perf_evlist__purge(struct perf_evlist *evlist) in perf_evlist__purge() argument
118 perf_evlist__for_each_entry_safe(evlist, n, pos) { in perf_evlist__purge()
123 evlist->nr_entries = 0; in perf_evlist__purge()
126 void perf_evlist__exit(struct perf_evlist *evlist) in perf_evlist__exit() argument
128 perf_cpu_map__put(evlist->cpus); in perf_evlist__exit()
129 perf_cpu_map__put(evlist->all_cpus); in perf_evlist__exit()
130 perf_thread_map__put(evlist->threads); in perf_evlist__exit()
131 evlist->cpus = NULL; in perf_evlist__exit()
132 evlist->all_cpus = NULL; in perf_evlist__exit()
133 evlist->threads = NULL; in perf_evlist__exit()
134 fdarray__exit(&evlist->pollfd); in perf_evlist__exit()
137 void perf_evlist__delete(struct perf_evlist *evlist) in perf_evlist__delete() argument
139 if (evlist == NULL) in perf_evlist__delete()
142 perf_evlist__munmap(evlist); in perf_evlist__delete()
143 perf_evlist__close(evlist); in perf_evlist__delete()
144 perf_evlist__purge(evlist); in perf_evlist__delete()
145 perf_evlist__exit(evlist); in perf_evlist__delete()
146 free(evlist); in perf_evlist__delete()
149 void perf_evlist__set_maps(struct perf_evlist *evlist, in perf_evlist__set_maps() argument
160 if (cpus != evlist->cpus) { in perf_evlist__set_maps()
161 perf_cpu_map__put(evlist->cpus); in perf_evlist__set_maps()
162 evlist->cpus = perf_cpu_map__get(cpus); in perf_evlist__set_maps()
165 if (threads != evlist->threads) { in perf_evlist__set_maps()
166 perf_thread_map__put(evlist->threads); in perf_evlist__set_maps()
167 evlist->threads = perf_thread_map__get(threads); in perf_evlist__set_maps()
170 if (!evlist->all_cpus && cpus) in perf_evlist__set_maps()
171 evlist->all_cpus = perf_cpu_map__get(cpus); in perf_evlist__set_maps()
173 perf_evlist__propagate_maps(evlist); in perf_evlist__set_maps()
176 int perf_evlist__open(struct perf_evlist *evlist) in perf_evlist__open() argument
181 perf_evlist__for_each_entry(evlist, evsel) { in perf_evlist__open()
190 perf_evlist__close(evlist); in perf_evlist__open()
194 void perf_evlist__close(struct perf_evlist *evlist) in perf_evlist__close() argument
198 perf_evlist__for_each_entry_reverse(evlist, evsel) in perf_evlist__close()
202 void perf_evlist__enable(struct perf_evlist *evlist) in perf_evlist__enable() argument
206 perf_evlist__for_each_entry(evlist, evsel) in perf_evlist__enable()
210 void perf_evlist__disable(struct perf_evlist *evlist) in perf_evlist__disable() argument
214 perf_evlist__for_each_entry(evlist, evsel) in perf_evlist__disable()
218 u64 perf_evlist__read_format(struct perf_evlist *evlist) in perf_evlist__read_format() argument
220 struct perf_evsel *first = perf_evlist__first(evlist); in perf_evlist__read_format()
227 static void perf_evlist__id_hash(struct perf_evlist *evlist, in perf_evlist__id_hash() argument
237 hlist_add_head(&sid->node, &evlist->heads[hash]); in perf_evlist__id_hash()
240 void perf_evlist__id_add(struct perf_evlist *evlist, in perf_evlist__id_add() argument
244 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); in perf_evlist__id_add()
248 int perf_evlist__id_add_fd(struct perf_evlist *evlist, in perf_evlist__id_add_fd() argument
270 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) in perf_evlist__id_add_fd()
285 perf_evlist__id_add(evlist, evsel, cpu, thread, id); in perf_evlist__id_add_fd()
289 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) in perf_evlist__alloc_pollfd() argument
291 int nr_cpus = perf_cpu_map__nr(evlist->cpus); in perf_evlist__alloc_pollfd()
292 int nr_threads = perf_thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd()
296 perf_evlist__for_each_entry(evlist, evsel) { in perf_evlist__alloc_pollfd()
303 if (fdarray__available_entries(&evlist->pollfd) < nfds && in perf_evlist__alloc_pollfd()
304 fdarray__grow(&evlist->pollfd, nfds) < 0) in perf_evlist__alloc_pollfd()
310 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, in perf_evlist__add_pollfd() argument
313 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags); in perf_evlist__add_pollfd()
316 evlist->pollfd.priv[pos].ptr = ptr; in perf_evlist__add_pollfd()
332 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) in perf_evlist__filter_pollfd() argument
334 return fdarray__filter(&evlist->pollfd, revents_and_mask, in perf_evlist__filter_pollfd()
338 int perf_evlist__poll(struct perf_evlist *evlist, int timeout) in perf_evlist__poll() argument
340 return fdarray__poll(&evlist->pollfd, timeout); in perf_evlist__poll()
343 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite) in perf_evlist__alloc_mmap() argument
348 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); in perf_evlist__alloc_mmap()
352 for (i = 0; i < evlist->nr_mmaps; i++) { in perf_evlist__alloc_mmap()
380 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx) in perf_evlist__mmap_cb_get() argument
384 maps = overwrite ? evlist->mmap_ovw : evlist->mmap; in perf_evlist__mmap_cb_get()
387 maps = perf_evlist__alloc_mmap(evlist, overwrite); in perf_evlist__mmap_cb_get()
392 evlist->mmap_ovw = maps; in perf_evlist__mmap_cb_get()
394 evlist->mmap = maps; in perf_evlist__mmap_cb_get()
409 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map, in perf_evlist__set_mmap_first() argument
413 evlist->mmap_ovw_first = map; in perf_evlist__set_mmap_first()
415 evlist->mmap_first = map; in perf_evlist__set_mmap_first()
419 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_evsel() argument
423 int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx); in mmap_per_evsel()
427 perf_evlist__for_each_entry(evlist, evsel) { in mmap_per_evsel()
439 map = ops->get(evlist, overwrite, idx); in mmap_per_evsel()
475 perf_evlist__set_mmap_first(evlist, map, overwrite); in mmap_per_evsel()
486 perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) { in mmap_per_evsel()
492 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, in mmap_per_evsel()
503 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_thread() argument
507 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_thread()
514 ops->idx(evlist, mp, thread, false); in mmap_per_thread()
516 if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread, in mmap_per_thread()
524 perf_evlist__munmap(evlist); in mmap_per_thread()
529 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_cpu() argument
532 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_cpu()
533 int nr_cpus = perf_cpu_map__nr(evlist->cpus); in mmap_per_cpu()
541 ops->idx(evlist, mp, cpu, true); in mmap_per_cpu()
544 if (mmap_per_evsel(evlist, ops, cpu, mp, cpu, in mmap_per_cpu()
553 perf_evlist__munmap(evlist); in mmap_per_cpu()
557 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist) in perf_evlist__nr_mmaps() argument
561 nr_mmaps = perf_cpu_map__nr(evlist->cpus); in perf_evlist__nr_mmaps()
562 if (perf_cpu_map__empty(evlist->cpus)) in perf_evlist__nr_mmaps()
563 nr_mmaps = perf_thread_map__nr(evlist->threads); in perf_evlist__nr_mmaps()
568 int perf_evlist__mmap_ops(struct perf_evlist *evlist, in perf_evlist__mmap_ops() argument
573 const struct perf_cpu_map *cpus = evlist->cpus; in perf_evlist__mmap_ops()
578 mp->mask = evlist->mmap_len - page_size - 1; in perf_evlist__mmap_ops()
580 evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist); in perf_evlist__mmap_ops()
582 perf_evlist__for_each_entry(evlist, evsel) { in perf_evlist__mmap_ops()
589 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) in perf_evlist__mmap_ops()
593 return mmap_per_thread(evlist, ops, mp); in perf_evlist__mmap_ops()
595 return mmap_per_cpu(evlist, ops, mp); in perf_evlist__mmap_ops()
598 int perf_evlist__mmap(struct perf_evlist *evlist, int pages) in perf_evlist__mmap() argument
606 evlist->mmap_len = (pages + 1) * page_size; in perf_evlist__mmap()
608 return perf_evlist__mmap_ops(evlist, &ops, &mp); in perf_evlist__mmap()
611 void perf_evlist__munmap(struct perf_evlist *evlist) in perf_evlist__munmap() argument
615 if (evlist->mmap) { in perf_evlist__munmap()
616 for (i = 0; i < evlist->nr_mmaps; i++) in perf_evlist__munmap()
617 perf_mmap__munmap(&evlist->mmap[i]); in perf_evlist__munmap()
620 if (evlist->mmap_ovw) { in perf_evlist__munmap()
621 for (i = 0; i < evlist->nr_mmaps; i++) in perf_evlist__munmap()
622 perf_mmap__munmap(&evlist->mmap_ovw[i]); in perf_evlist__munmap()
625 zfree(&evlist->mmap); in perf_evlist__munmap()
626 zfree(&evlist->mmap_ovw); in perf_evlist__munmap()
630 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map, in perf_evlist__next_mmap() argument
636 return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first; in perf_evlist__next_mmap()