xref: /OK3568_Linux_fs/kernel/tools/perf/builtin-kvm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include "builtin.h"
3*4882a593Smuzhiyun #include "perf.h"
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include "util/build-id.h"
6*4882a593Smuzhiyun #include "util/evsel.h"
7*4882a593Smuzhiyun #include "util/evlist.h"
8*4882a593Smuzhiyun #include "util/mmap.h"
9*4882a593Smuzhiyun #include "util/term.h"
10*4882a593Smuzhiyun #include "util/symbol.h"
11*4882a593Smuzhiyun #include "util/thread.h"
12*4882a593Smuzhiyun #include "util/header.h"
13*4882a593Smuzhiyun #include "util/session.h"
14*4882a593Smuzhiyun #include "util/intlist.h"
15*4882a593Smuzhiyun #include <subcmd/pager.h>
16*4882a593Smuzhiyun #include <subcmd/parse-options.h>
17*4882a593Smuzhiyun #include "util/trace-event.h"
18*4882a593Smuzhiyun #include "util/debug.h"
19*4882a593Smuzhiyun #include "util/tool.h"
20*4882a593Smuzhiyun #include "util/stat.h"
21*4882a593Smuzhiyun #include "util/synthetic-events.h"
22*4882a593Smuzhiyun #include "util/top.h"
23*4882a593Smuzhiyun #include "util/data.h"
24*4882a593Smuzhiyun #include "util/ordered-events.h"
25*4882a593Smuzhiyun #include "util/kvm-stat.h"
26*4882a593Smuzhiyun #include "ui/ui.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <sys/prctl.h>
29*4882a593Smuzhiyun #ifdef HAVE_TIMERFD_SUPPORT
30*4882a593Smuzhiyun #include <sys/timerfd.h>
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun #include <sys/time.h>
33*4882a593Smuzhiyun #include <sys/types.h>
34*4882a593Smuzhiyun #include <sys/stat.h>
35*4882a593Smuzhiyun #include <fcntl.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <linux/err.h>
38*4882a593Smuzhiyun #include <linux/kernel.h>
39*4882a593Smuzhiyun #include <linux/string.h>
40*4882a593Smuzhiyun #include <linux/time64.h>
41*4882a593Smuzhiyun #include <linux/zalloc.h>
42*4882a593Smuzhiyun #include <errno.h>
43*4882a593Smuzhiyun #include <inttypes.h>
44*4882a593Smuzhiyun #include <poll.h>
45*4882a593Smuzhiyun #include <termios.h>
46*4882a593Smuzhiyun #include <semaphore.h>
47*4882a593Smuzhiyun #include <signal.h>
48*4882a593Smuzhiyun #include <math.h>
49*4882a593Smuzhiyun #include <perf/mmap.h>
50*4882a593Smuzhiyun 
get_filename_for_perf_kvm(void)51*4882a593Smuzhiyun static const char *get_filename_for_perf_kvm(void)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	const char *filename;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	if (perf_host && !perf_guest)
56*4882a593Smuzhiyun 		filename = strdup("perf.data.host");
57*4882a593Smuzhiyun 	else if (!perf_host && perf_guest)
58*4882a593Smuzhiyun 		filename = strdup("perf.data.guest");
59*4882a593Smuzhiyun 	else
60*4882a593Smuzhiyun 		filename = strdup("perf.data.kvm");
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	return filename;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #ifdef HAVE_KVM_STAT_SUPPORT
66*4882a593Smuzhiyun 
exit_event_get_key(struct evsel * evsel,struct perf_sample * sample,struct event_key * key)67*4882a593Smuzhiyun void exit_event_get_key(struct evsel *evsel,
68*4882a593Smuzhiyun 			struct perf_sample *sample,
69*4882a593Smuzhiyun 			struct event_key *key)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	key->info = 0;
72*4882a593Smuzhiyun 	key->key  = evsel__intval(evsel, sample, kvm_exit_reason);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
kvm_exit_event(struct evsel * evsel)75*4882a593Smuzhiyun bool kvm_exit_event(struct evsel *evsel)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	return !strcmp(evsel->name, kvm_exit_trace);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
exit_event_begin(struct evsel * evsel,struct perf_sample * sample,struct event_key * key)80*4882a593Smuzhiyun bool exit_event_begin(struct evsel *evsel,
81*4882a593Smuzhiyun 		      struct perf_sample *sample, struct event_key *key)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	if (kvm_exit_event(evsel)) {
84*4882a593Smuzhiyun 		exit_event_get_key(evsel, sample, key);
85*4882a593Smuzhiyun 		return true;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return false;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
kvm_entry_event(struct evsel * evsel)91*4882a593Smuzhiyun bool kvm_entry_event(struct evsel *evsel)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	return !strcmp(evsel->name, kvm_entry_trace);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
exit_event_end(struct evsel * evsel,struct perf_sample * sample __maybe_unused,struct event_key * key __maybe_unused)96*4882a593Smuzhiyun bool exit_event_end(struct evsel *evsel,
97*4882a593Smuzhiyun 		    struct perf_sample *sample __maybe_unused,
98*4882a593Smuzhiyun 		    struct event_key *key __maybe_unused)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	return kvm_entry_event(evsel);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
get_exit_reason(struct perf_kvm_stat * kvm,struct exit_reasons_table * tbl,u64 exit_code)103*4882a593Smuzhiyun static const char *get_exit_reason(struct perf_kvm_stat *kvm,
104*4882a593Smuzhiyun 				   struct exit_reasons_table *tbl,
105*4882a593Smuzhiyun 				   u64 exit_code)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	while (tbl->reason != NULL) {
108*4882a593Smuzhiyun 		if (tbl->exit_code == exit_code)
109*4882a593Smuzhiyun 			return tbl->reason;
110*4882a593Smuzhiyun 		tbl++;
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	pr_err("unknown kvm exit code:%lld on %s\n",
114*4882a593Smuzhiyun 		(unsigned long long)exit_code, kvm->exit_reasons_isa);
115*4882a593Smuzhiyun 	return "UNKNOWN";
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
exit_event_decode_key(struct perf_kvm_stat * kvm,struct event_key * key,char * decode)118*4882a593Smuzhiyun void exit_event_decode_key(struct perf_kvm_stat *kvm,
119*4882a593Smuzhiyun 			   struct event_key *key,
120*4882a593Smuzhiyun 			   char *decode)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
123*4882a593Smuzhiyun 						  key->key);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	scnprintf(decode, decode_str_len, "%s", exit_reason);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
register_kvm_events_ops(struct perf_kvm_stat * kvm)128*4882a593Smuzhiyun static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
133*4882a593Smuzhiyun 		if (!strcmp(events_ops->name, kvm->report_event)) {
134*4882a593Smuzhiyun 			kvm->events_ops = events_ops->ops;
135*4882a593Smuzhiyun 			return true;
136*4882a593Smuzhiyun 		}
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	return false;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun struct vcpu_event_record {
143*4882a593Smuzhiyun 	int vcpu_id;
144*4882a593Smuzhiyun 	u64 start_time;
145*4882a593Smuzhiyun 	struct kvm_event *last_event;
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 
init_kvm_event_record(struct perf_kvm_stat * kvm)149*4882a593Smuzhiyun static void init_kvm_event_record(struct perf_kvm_stat *kvm)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	unsigned int i;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	for (i = 0; i < EVENTS_CACHE_SIZE; i++)
154*4882a593Smuzhiyun 		INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun #ifdef HAVE_TIMERFD_SUPPORT
clear_events_cache_stats(struct list_head * kvm_events_cache)158*4882a593Smuzhiyun static void clear_events_cache_stats(struct list_head *kvm_events_cache)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	struct list_head *head;
161*4882a593Smuzhiyun 	struct kvm_event *event;
162*4882a593Smuzhiyun 	unsigned int i;
163*4882a593Smuzhiyun 	int j;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
166*4882a593Smuzhiyun 		head = &kvm_events_cache[i];
167*4882a593Smuzhiyun 		list_for_each_entry(event, head, hash_entry) {
168*4882a593Smuzhiyun 			/* reset stats for event */
169*4882a593Smuzhiyun 			event->total.time = 0;
170*4882a593Smuzhiyun 			init_stats(&event->total.stats);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 			for (j = 0; j < event->max_vcpu; ++j) {
173*4882a593Smuzhiyun 				event->vcpu[j].time = 0;
174*4882a593Smuzhiyun 				init_stats(&event->vcpu[j].stats);
175*4882a593Smuzhiyun 			}
176*4882a593Smuzhiyun 		}
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun #endif
180*4882a593Smuzhiyun 
kvm_events_hash_fn(u64 key)181*4882a593Smuzhiyun static int kvm_events_hash_fn(u64 key)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	return key & (EVENTS_CACHE_SIZE - 1);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
kvm_event_expand(struct kvm_event * event,int vcpu_id)186*4882a593Smuzhiyun static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	int old_max_vcpu = event->max_vcpu;
189*4882a593Smuzhiyun 	void *prev;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (vcpu_id < event->max_vcpu)
192*4882a593Smuzhiyun 		return true;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	while (event->max_vcpu <= vcpu_id)
195*4882a593Smuzhiyun 		event->max_vcpu += DEFAULT_VCPU_NUM;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	prev = event->vcpu;
198*4882a593Smuzhiyun 	event->vcpu = realloc(event->vcpu,
199*4882a593Smuzhiyun 			      event->max_vcpu * sizeof(*event->vcpu));
200*4882a593Smuzhiyun 	if (!event->vcpu) {
201*4882a593Smuzhiyun 		free(prev);
202*4882a593Smuzhiyun 		pr_err("Not enough memory\n");
203*4882a593Smuzhiyun 		return false;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	memset(event->vcpu + old_max_vcpu, 0,
207*4882a593Smuzhiyun 	       (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
208*4882a593Smuzhiyun 	return true;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
kvm_alloc_init_event(struct event_key * key)211*4882a593Smuzhiyun static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct kvm_event *event;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	event = zalloc(sizeof(*event));
216*4882a593Smuzhiyun 	if (!event) {
217*4882a593Smuzhiyun 		pr_err("Not enough memory\n");
218*4882a593Smuzhiyun 		return NULL;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	event->key = *key;
222*4882a593Smuzhiyun 	init_stats(&event->total.stats);
223*4882a593Smuzhiyun 	return event;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
find_create_kvm_event(struct perf_kvm_stat * kvm,struct event_key * key)226*4882a593Smuzhiyun static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
227*4882a593Smuzhiyun 					       struct event_key *key)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct kvm_event *event;
230*4882a593Smuzhiyun 	struct list_head *head;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	BUG_ON(key->key == INVALID_KEY);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
235*4882a593Smuzhiyun 	list_for_each_entry(event, head, hash_entry) {
236*4882a593Smuzhiyun 		if (event->key.key == key->key && event->key.info == key->info)
237*4882a593Smuzhiyun 			return event;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	event = kvm_alloc_init_event(key);
241*4882a593Smuzhiyun 	if (!event)
242*4882a593Smuzhiyun 		return NULL;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	list_add(&event->hash_entry, head);
245*4882a593Smuzhiyun 	return event;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
handle_begin_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,u64 timestamp)248*4882a593Smuzhiyun static bool handle_begin_event(struct perf_kvm_stat *kvm,
249*4882a593Smuzhiyun 			       struct vcpu_event_record *vcpu_record,
250*4882a593Smuzhiyun 			       struct event_key *key, u64 timestamp)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct kvm_event *event = NULL;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if (key->key != INVALID_KEY)
255*4882a593Smuzhiyun 		event = find_create_kvm_event(kvm, key);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	vcpu_record->last_event = event;
258*4882a593Smuzhiyun 	vcpu_record->start_time = timestamp;
259*4882a593Smuzhiyun 	return true;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun static void
kvm_update_event_stats(struct kvm_event_stats * kvm_stats,u64 time_diff)263*4882a593Smuzhiyun kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	kvm_stats->time += time_diff;
266*4882a593Smuzhiyun 	update_stats(&kvm_stats->stats, time_diff);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
kvm_event_rel_stddev(int vcpu_id,struct kvm_event * event)269*4882a593Smuzhiyun static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	struct kvm_event_stats *kvm_stats = &event->total;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (vcpu_id != -1)
274*4882a593Smuzhiyun 		kvm_stats = &event->vcpu[vcpu_id];
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
277*4882a593Smuzhiyun 				avg_stats(&kvm_stats->stats));
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
update_kvm_event(struct kvm_event * event,int vcpu_id,u64 time_diff)280*4882a593Smuzhiyun static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
281*4882a593Smuzhiyun 			     u64 time_diff)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	if (vcpu_id == -1) {
284*4882a593Smuzhiyun 		kvm_update_event_stats(&event->total, time_diff);
285*4882a593Smuzhiyun 		return true;
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (!kvm_event_expand(event, vcpu_id))
289*4882a593Smuzhiyun 		return false;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
292*4882a593Smuzhiyun 	return true;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
is_child_event(struct perf_kvm_stat * kvm,struct evsel * evsel,struct perf_sample * sample,struct event_key * key)295*4882a593Smuzhiyun static bool is_child_event(struct perf_kvm_stat *kvm,
296*4882a593Smuzhiyun 			   struct evsel *evsel,
297*4882a593Smuzhiyun 			   struct perf_sample *sample,
298*4882a593Smuzhiyun 			   struct event_key *key)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct child_event_ops *child_ops;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	child_ops = kvm->events_ops->child_ops;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (!child_ops)
305*4882a593Smuzhiyun 		return false;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	for (; child_ops->name; child_ops++) {
308*4882a593Smuzhiyun 		if (!strcmp(evsel->name, child_ops->name)) {
309*4882a593Smuzhiyun 			child_ops->get_key(evsel, sample, key);
310*4882a593Smuzhiyun 			return true;
311*4882a593Smuzhiyun 		}
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	return false;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
handle_child_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,struct perf_sample * sample __maybe_unused)317*4882a593Smuzhiyun static bool handle_child_event(struct perf_kvm_stat *kvm,
318*4882a593Smuzhiyun 			       struct vcpu_event_record *vcpu_record,
319*4882a593Smuzhiyun 			       struct event_key *key,
320*4882a593Smuzhiyun 			       struct perf_sample *sample __maybe_unused)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	struct kvm_event *event = NULL;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (key->key != INVALID_KEY)
325*4882a593Smuzhiyun 		event = find_create_kvm_event(kvm, key);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	vcpu_record->last_event = event;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	return true;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
skip_event(const char * event)332*4882a593Smuzhiyun static bool skip_event(const char *event)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	const char * const *skip_events;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	for (skip_events = kvm_skip_events; *skip_events; skip_events++)
337*4882a593Smuzhiyun 		if (!strcmp(event, *skip_events))
338*4882a593Smuzhiyun 			return true;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return false;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
handle_end_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,struct perf_sample * sample)343*4882a593Smuzhiyun static bool handle_end_event(struct perf_kvm_stat *kvm,
344*4882a593Smuzhiyun 			     struct vcpu_event_record *vcpu_record,
345*4882a593Smuzhiyun 			     struct event_key *key,
346*4882a593Smuzhiyun 			     struct perf_sample *sample)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct kvm_event *event;
349*4882a593Smuzhiyun 	u64 time_begin, time_diff;
350*4882a593Smuzhiyun 	int vcpu;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (kvm->trace_vcpu == -1)
353*4882a593Smuzhiyun 		vcpu = -1;
354*4882a593Smuzhiyun 	else
355*4882a593Smuzhiyun 		vcpu = vcpu_record->vcpu_id;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	event = vcpu_record->last_event;
358*4882a593Smuzhiyun 	time_begin = vcpu_record->start_time;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* The begin event is not caught. */
361*4882a593Smuzhiyun 	if (!time_begin)
362*4882a593Smuzhiyun 		return true;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/*
365*4882a593Smuzhiyun 	 * In some case, the 'begin event' only records the start timestamp,
366*4882a593Smuzhiyun 	 * the actual event is recognized in the 'end event' (e.g. mmio-event).
367*4882a593Smuzhiyun 	 */
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	/* Both begin and end events did not get the key. */
370*4882a593Smuzhiyun 	if (!event && key->key == INVALID_KEY)
371*4882a593Smuzhiyun 		return true;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	if (!event)
374*4882a593Smuzhiyun 		event = find_create_kvm_event(kvm, key);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	if (!event)
377*4882a593Smuzhiyun 		return false;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	vcpu_record->last_event = NULL;
380*4882a593Smuzhiyun 	vcpu_record->start_time = 0;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* seems to happen once in a while during live mode */
383*4882a593Smuzhiyun 	if (sample->time < time_begin) {
384*4882a593Smuzhiyun 		pr_debug("End time before begin time; skipping event.\n");
385*4882a593Smuzhiyun 		return true;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	time_diff = sample->time - time_begin;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (kvm->duration && time_diff > kvm->duration) {
391*4882a593Smuzhiyun 		char decode[decode_str_len];
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		kvm->events_ops->decode_key(kvm, &event->key, decode);
394*4882a593Smuzhiyun 		if (!skip_event(decode)) {
395*4882a593Smuzhiyun 			pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
396*4882a593Smuzhiyun 				 sample->time, sample->pid, vcpu_record->vcpu_id,
397*4882a593Smuzhiyun 				 decode, time_diff / NSEC_PER_USEC);
398*4882a593Smuzhiyun 		}
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return update_kvm_event(event, vcpu, time_diff);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun static
per_vcpu_record(struct thread * thread,struct evsel * evsel,struct perf_sample * sample)405*4882a593Smuzhiyun struct vcpu_event_record *per_vcpu_record(struct thread *thread,
406*4882a593Smuzhiyun 					  struct evsel *evsel,
407*4882a593Smuzhiyun 					  struct perf_sample *sample)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	/* Only kvm_entry records vcpu id. */
410*4882a593Smuzhiyun 	if (!thread__priv(thread) && kvm_entry_event(evsel)) {
411*4882a593Smuzhiyun 		struct vcpu_event_record *vcpu_record;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		vcpu_record = zalloc(sizeof(*vcpu_record));
414*4882a593Smuzhiyun 		if (!vcpu_record) {
415*4882a593Smuzhiyun 			pr_err("%s: Not enough memory\n", __func__);
416*4882a593Smuzhiyun 			return NULL;
417*4882a593Smuzhiyun 		}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 		vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str);
420*4882a593Smuzhiyun 		thread__set_priv(thread, vcpu_record);
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	return thread__priv(thread);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
handle_kvm_event(struct perf_kvm_stat * kvm,struct thread * thread,struct evsel * evsel,struct perf_sample * sample)426*4882a593Smuzhiyun static bool handle_kvm_event(struct perf_kvm_stat *kvm,
427*4882a593Smuzhiyun 			     struct thread *thread,
428*4882a593Smuzhiyun 			     struct evsel *evsel,
429*4882a593Smuzhiyun 			     struct perf_sample *sample)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	struct vcpu_event_record *vcpu_record;
432*4882a593Smuzhiyun 	struct event_key key = { .key = INVALID_KEY,
433*4882a593Smuzhiyun 				 .exit_reasons = kvm->exit_reasons };
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	vcpu_record = per_vcpu_record(thread, evsel, sample);
436*4882a593Smuzhiyun 	if (!vcpu_record)
437*4882a593Smuzhiyun 		return true;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/* only process events for vcpus user cares about */
440*4882a593Smuzhiyun 	if ((kvm->trace_vcpu != -1) &&
441*4882a593Smuzhiyun 	    (kvm->trace_vcpu != vcpu_record->vcpu_id))
442*4882a593Smuzhiyun 		return true;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	if (kvm->events_ops->is_begin_event(evsel, sample, &key))
445*4882a593Smuzhiyun 		return handle_begin_event(kvm, vcpu_record, &key, sample->time);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	if (is_child_event(kvm, evsel, sample, &key))
448*4882a593Smuzhiyun 		return handle_child_event(kvm, vcpu_record, &key, sample);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (kvm->events_ops->is_end_event(evsel, sample, &key))
451*4882a593Smuzhiyun 		return handle_end_event(kvm, vcpu_record, &key, sample);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	return true;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun #define GET_EVENT_KEY(func, field)					\
457*4882a593Smuzhiyun static u64 get_event_ ##func(struct kvm_event *event, int vcpu)		\
458*4882a593Smuzhiyun {									\
459*4882a593Smuzhiyun 	if (vcpu == -1)							\
460*4882a593Smuzhiyun 		return event->total.field;				\
461*4882a593Smuzhiyun 									\
462*4882a593Smuzhiyun 	if (vcpu >= event->max_vcpu)					\
463*4882a593Smuzhiyun 		return 0;						\
464*4882a593Smuzhiyun 									\
465*4882a593Smuzhiyun 	return event->vcpu[vcpu].field;					\
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun #define COMPARE_EVENT_KEY(func, field)					\
469*4882a593Smuzhiyun GET_EVENT_KEY(func, field)						\
470*4882a593Smuzhiyun static int compare_kvm_event_ ## func(struct kvm_event *one,		\
471*4882a593Smuzhiyun 					struct kvm_event *two, int vcpu)\
472*4882a593Smuzhiyun {									\
473*4882a593Smuzhiyun 	return get_event_ ##func(one, vcpu) >				\
474*4882a593Smuzhiyun 				get_event_ ##func(two, vcpu);		\
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun GET_EVENT_KEY(time, time);
478*4882a593Smuzhiyun COMPARE_EVENT_KEY(count, stats.n);
479*4882a593Smuzhiyun COMPARE_EVENT_KEY(mean, stats.mean);
480*4882a593Smuzhiyun GET_EVENT_KEY(max, stats.max);
481*4882a593Smuzhiyun GET_EVENT_KEY(min, stats.min);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun #define DEF_SORT_NAME_KEY(name, compare_key)				\
484*4882a593Smuzhiyun 	{ #name, compare_kvm_event_ ## compare_key }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun static struct kvm_event_key keys[] = {
487*4882a593Smuzhiyun 	DEF_SORT_NAME_KEY(sample, count),
488*4882a593Smuzhiyun 	DEF_SORT_NAME_KEY(time, mean),
489*4882a593Smuzhiyun 	{ NULL, NULL }
490*4882a593Smuzhiyun };
491*4882a593Smuzhiyun 
select_key(struct perf_kvm_stat * kvm)492*4882a593Smuzhiyun static bool select_key(struct perf_kvm_stat *kvm)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	int i;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	for (i = 0; keys[i].name; i++) {
497*4882a593Smuzhiyun 		if (!strcmp(keys[i].name, kvm->sort_key)) {
498*4882a593Smuzhiyun 			kvm->compare = keys[i].key;
499*4882a593Smuzhiyun 			return true;
500*4882a593Smuzhiyun 		}
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	pr_err("Unknown compare key:%s\n", kvm->sort_key);
504*4882a593Smuzhiyun 	return false;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
insert_to_result(struct rb_root * result,struct kvm_event * event,key_cmp_fun bigger,int vcpu)507*4882a593Smuzhiyun static void insert_to_result(struct rb_root *result, struct kvm_event *event,
508*4882a593Smuzhiyun 			     key_cmp_fun bigger, int vcpu)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct rb_node **rb = &result->rb_node;
511*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
512*4882a593Smuzhiyun 	struct kvm_event *p;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	while (*rb) {
515*4882a593Smuzhiyun 		p = container_of(*rb, struct kvm_event, rb);
516*4882a593Smuzhiyun 		parent = *rb;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 		if (bigger(event, p, vcpu))
519*4882a593Smuzhiyun 			rb = &(*rb)->rb_left;
520*4882a593Smuzhiyun 		else
521*4882a593Smuzhiyun 			rb = &(*rb)->rb_right;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	rb_link_node(&event->rb, parent, rb);
525*4882a593Smuzhiyun 	rb_insert_color(&event->rb, result);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun static void
update_total_count(struct perf_kvm_stat * kvm,struct kvm_event * event)529*4882a593Smuzhiyun update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	int vcpu = kvm->trace_vcpu;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	kvm->total_count += get_event_count(event, vcpu);
534*4882a593Smuzhiyun 	kvm->total_time += get_event_time(event, vcpu);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
event_is_valid(struct kvm_event * event,int vcpu)537*4882a593Smuzhiyun static bool event_is_valid(struct kvm_event *event, int vcpu)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	return !!get_event_count(event, vcpu);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun 
sort_result(struct perf_kvm_stat * kvm)542*4882a593Smuzhiyun static void sort_result(struct perf_kvm_stat *kvm)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	unsigned int i;
545*4882a593Smuzhiyun 	int vcpu = kvm->trace_vcpu;
546*4882a593Smuzhiyun 	struct kvm_event *event;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
549*4882a593Smuzhiyun 		list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) {
550*4882a593Smuzhiyun 			if (event_is_valid(event, vcpu)) {
551*4882a593Smuzhiyun 				update_total_count(kvm, event);
552*4882a593Smuzhiyun 				insert_to_result(&kvm->result, event,
553*4882a593Smuzhiyun 						 kvm->compare, vcpu);
554*4882a593Smuzhiyun 			}
555*4882a593Smuzhiyun 		}
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun /* returns left most element of result, and erase it */
pop_from_result(struct rb_root * result)560*4882a593Smuzhiyun static struct kvm_event *pop_from_result(struct rb_root *result)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct rb_node *node = rb_first(result);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (!node)
565*4882a593Smuzhiyun 		return NULL;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	rb_erase(node, result);
568*4882a593Smuzhiyun 	return container_of(node, struct kvm_event, rb);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun 
print_vcpu_info(struct perf_kvm_stat * kvm)571*4882a593Smuzhiyun static void print_vcpu_info(struct perf_kvm_stat *kvm)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	int vcpu = kvm->trace_vcpu;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	pr_info("Analyze events for ");
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (kvm->opts.target.system_wide)
578*4882a593Smuzhiyun 		pr_info("all VMs, ");
579*4882a593Smuzhiyun 	else if (kvm->opts.target.pid)
580*4882a593Smuzhiyun 		pr_info("pid(s) %s, ", kvm->opts.target.pid);
581*4882a593Smuzhiyun 	else
582*4882a593Smuzhiyun 		pr_info("dazed and confused on what is monitored, ");
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (vcpu == -1)
585*4882a593Smuzhiyun 		pr_info("all VCPUs:\n\n");
586*4882a593Smuzhiyun 	else
587*4882a593Smuzhiyun 		pr_info("VCPU %d:\n\n", vcpu);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
show_timeofday(void)590*4882a593Smuzhiyun static void show_timeofday(void)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	char date[64];
593*4882a593Smuzhiyun 	struct timeval tv;
594*4882a593Smuzhiyun 	struct tm ltime;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	gettimeofday(&tv, NULL);
597*4882a593Smuzhiyun 	if (localtime_r(&tv.tv_sec, &ltime)) {
598*4882a593Smuzhiyun 		strftime(date, sizeof(date), "%H:%M:%S", &ltime);
599*4882a593Smuzhiyun 		pr_info("%s.%06ld", date, tv.tv_usec);
600*4882a593Smuzhiyun 	} else
601*4882a593Smuzhiyun 		pr_info("00:00:00.000000");
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	return;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
print_result(struct perf_kvm_stat * kvm)606*4882a593Smuzhiyun static void print_result(struct perf_kvm_stat *kvm)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	char decode[decode_str_len];
609*4882a593Smuzhiyun 	struct kvm_event *event;
610*4882a593Smuzhiyun 	int vcpu = kvm->trace_vcpu;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (kvm->live) {
613*4882a593Smuzhiyun 		puts(CONSOLE_CLEAR);
614*4882a593Smuzhiyun 		show_timeofday();
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	pr_info("\n\n");
618*4882a593Smuzhiyun 	print_vcpu_info(kvm);
619*4882a593Smuzhiyun 	pr_info("%*s ", decode_str_len, kvm->events_ops->name);
620*4882a593Smuzhiyun 	pr_info("%10s ", "Samples");
621*4882a593Smuzhiyun 	pr_info("%9s ", "Samples%");
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	pr_info("%9s ", "Time%");
624*4882a593Smuzhiyun 	pr_info("%11s ", "Min Time");
625*4882a593Smuzhiyun 	pr_info("%11s ", "Max Time");
626*4882a593Smuzhiyun 	pr_info("%16s ", "Avg time");
627*4882a593Smuzhiyun 	pr_info("\n\n");
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	while ((event = pop_from_result(&kvm->result))) {
630*4882a593Smuzhiyun 		u64 ecount, etime, max, min;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 		ecount = get_event_count(event, vcpu);
633*4882a593Smuzhiyun 		etime = get_event_time(event, vcpu);
634*4882a593Smuzhiyun 		max = get_event_max(event, vcpu);
635*4882a593Smuzhiyun 		min = get_event_min(event, vcpu);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 		kvm->events_ops->decode_key(kvm, &event->key, decode);
638*4882a593Smuzhiyun 		pr_info("%*s ", decode_str_len, decode);
639*4882a593Smuzhiyun 		pr_info("%10llu ", (unsigned long long)ecount);
640*4882a593Smuzhiyun 		pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
641*4882a593Smuzhiyun 		pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
642*4882a593Smuzhiyun 		pr_info("%9.2fus ", (double)min / NSEC_PER_USEC);
643*4882a593Smuzhiyun 		pr_info("%9.2fus ", (double)max / NSEC_PER_USEC);
644*4882a593Smuzhiyun 		pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount / NSEC_PER_USEC,
645*4882a593Smuzhiyun 			kvm_event_rel_stddev(vcpu, event));
646*4882a593Smuzhiyun 		pr_info("\n");
647*4882a593Smuzhiyun 	}
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
650*4882a593Smuzhiyun 		kvm->total_count, kvm->total_time / (double)NSEC_PER_USEC);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	if (kvm->lost_events)
653*4882a593Smuzhiyun 		pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun #ifdef HAVE_TIMERFD_SUPPORT
process_lost_event(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)657*4882a593Smuzhiyun static int process_lost_event(struct perf_tool *tool,
658*4882a593Smuzhiyun 			      union perf_event *event __maybe_unused,
659*4882a593Smuzhiyun 			      struct perf_sample *sample __maybe_unused,
660*4882a593Smuzhiyun 			      struct machine *machine __maybe_unused)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	kvm->lost_events++;
665*4882a593Smuzhiyun 	return 0;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun #endif
668*4882a593Smuzhiyun 
skip_sample(struct perf_kvm_stat * kvm,struct perf_sample * sample)669*4882a593Smuzhiyun static bool skip_sample(struct perf_kvm_stat *kvm,
670*4882a593Smuzhiyun 			struct perf_sample *sample)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
673*4882a593Smuzhiyun 		return true;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	return false;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
process_sample_event(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)678*4882a593Smuzhiyun static int process_sample_event(struct perf_tool *tool,
679*4882a593Smuzhiyun 				union perf_event *event,
680*4882a593Smuzhiyun 				struct perf_sample *sample,
681*4882a593Smuzhiyun 				struct evsel *evsel,
682*4882a593Smuzhiyun 				struct machine *machine)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	int err = 0;
685*4882a593Smuzhiyun 	struct thread *thread;
686*4882a593Smuzhiyun 	struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
687*4882a593Smuzhiyun 						 tool);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	if (skip_sample(kvm, sample))
690*4882a593Smuzhiyun 		return 0;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
693*4882a593Smuzhiyun 	if (thread == NULL) {
694*4882a593Smuzhiyun 		pr_debug("problem processing %d event, skipping it.\n",
695*4882a593Smuzhiyun 			event->header.type);
696*4882a593Smuzhiyun 		return -1;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	if (!handle_kvm_event(kvm, thread, evsel, sample))
700*4882a593Smuzhiyun 		err = -1;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	thread__put(thread);
703*4882a593Smuzhiyun 	return err;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
cpu_isa_config(struct perf_kvm_stat * kvm)706*4882a593Smuzhiyun static int cpu_isa_config(struct perf_kvm_stat *kvm)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	char buf[128], *cpuid;
709*4882a593Smuzhiyun 	int err;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	if (kvm->live) {
712*4882a593Smuzhiyun 		err = get_cpuid(buf, sizeof(buf));
713*4882a593Smuzhiyun 		if (err != 0) {
714*4882a593Smuzhiyun 			pr_err("Failed to look up CPU type: %s\n",
715*4882a593Smuzhiyun 			       str_error_r(err, buf, sizeof(buf)));
716*4882a593Smuzhiyun 			return -err;
717*4882a593Smuzhiyun 		}
718*4882a593Smuzhiyun 		cpuid = buf;
719*4882a593Smuzhiyun 	} else
720*4882a593Smuzhiyun 		cpuid = kvm->session->header.env.cpuid;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	if (!cpuid) {
723*4882a593Smuzhiyun 		pr_err("Failed to look up CPU type\n");
724*4882a593Smuzhiyun 		return -EINVAL;
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	err = cpu_isa_init(kvm, cpuid);
728*4882a593Smuzhiyun 	if (err == -ENOTSUP)
729*4882a593Smuzhiyun 		pr_err("CPU %s is not supported.\n", cpuid);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	return err;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
verify_vcpu(int vcpu)734*4882a593Smuzhiyun static bool verify_vcpu(int vcpu)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	if (vcpu != -1 && vcpu < 0) {
737*4882a593Smuzhiyun 		pr_err("Invalid vcpu:%d.\n", vcpu);
738*4882a593Smuzhiyun 		return false;
739*4882a593Smuzhiyun 	}
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	return true;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun #ifdef HAVE_TIMERFD_SUPPORT
745*4882a593Smuzhiyun /* keeping the max events to a modest level to keep
746*4882a593Smuzhiyun  * the processing of samples per mmap smooth.
747*4882a593Smuzhiyun  */
748*4882a593Smuzhiyun #define PERF_KVM__MAX_EVENTS_PER_MMAP  25
749*4882a593Smuzhiyun 
perf_kvm__mmap_read_idx(struct perf_kvm_stat * kvm,int idx,u64 * mmap_time)750*4882a593Smuzhiyun static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
751*4882a593Smuzhiyun 				   u64 *mmap_time)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	struct evlist *evlist = kvm->evlist;
754*4882a593Smuzhiyun 	union perf_event *event;
755*4882a593Smuzhiyun 	struct mmap *md;
756*4882a593Smuzhiyun 	u64 timestamp;
757*4882a593Smuzhiyun 	s64 n = 0;
758*4882a593Smuzhiyun 	int err;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	*mmap_time = ULLONG_MAX;
761*4882a593Smuzhiyun 	md = &evlist->mmap[idx];
762*4882a593Smuzhiyun 	err = perf_mmap__read_init(&md->core);
763*4882a593Smuzhiyun 	if (err < 0)
764*4882a593Smuzhiyun 		return (err == -EAGAIN) ? 0 : -1;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	while ((event = perf_mmap__read_event(&md->core)) != NULL) {
767*4882a593Smuzhiyun 		err = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
768*4882a593Smuzhiyun 		if (err) {
769*4882a593Smuzhiyun 			perf_mmap__consume(&md->core);
770*4882a593Smuzhiyun 			pr_err("Failed to parse sample\n");
771*4882a593Smuzhiyun 			return -1;
772*4882a593Smuzhiyun 		}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 		err = perf_session__queue_event(kvm->session, event, timestamp, 0);
775*4882a593Smuzhiyun 		/*
776*4882a593Smuzhiyun 		 * FIXME: Here we can't consume the event, as perf_session__queue_event will
777*4882a593Smuzhiyun 		 *        point to it, and it'll get possibly overwritten by the kernel.
778*4882a593Smuzhiyun 		 */
779*4882a593Smuzhiyun 		perf_mmap__consume(&md->core);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 		if (err) {
782*4882a593Smuzhiyun 			pr_err("Failed to enqueue sample: %d\n", err);
783*4882a593Smuzhiyun 			return -1;
784*4882a593Smuzhiyun 		}
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 		/* save time stamp of our first sample for this mmap */
787*4882a593Smuzhiyun 		if (n == 0)
788*4882a593Smuzhiyun 			*mmap_time = timestamp;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 		/* limit events per mmap handled all at once */
791*4882a593Smuzhiyun 		n++;
792*4882a593Smuzhiyun 		if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
793*4882a593Smuzhiyun 			break;
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	perf_mmap__read_done(&md->core);
797*4882a593Smuzhiyun 	return n;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
perf_kvm__mmap_read(struct perf_kvm_stat * kvm)800*4882a593Smuzhiyun static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	int i, err, throttled = 0;
803*4882a593Smuzhiyun 	s64 n, ntotal = 0;
804*4882a593Smuzhiyun 	u64 flush_time = ULLONG_MAX, mmap_time;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
807*4882a593Smuzhiyun 		n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
808*4882a593Smuzhiyun 		if (n < 0)
809*4882a593Smuzhiyun 			return -1;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 		/* flush time is going to be the minimum of all the individual
812*4882a593Smuzhiyun 		 * mmap times. Essentially, we flush all the samples queued up
813*4882a593Smuzhiyun 		 * from the last pass under our minimal start time -- that leaves
814*4882a593Smuzhiyun 		 * a very small race for samples to come in with a lower timestamp.
815*4882a593Smuzhiyun 		 * The ioctl to return the perf_clock timestamp should close the
816*4882a593Smuzhiyun 		 * race entirely.
817*4882a593Smuzhiyun 		 */
818*4882a593Smuzhiyun 		if (mmap_time < flush_time)
819*4882a593Smuzhiyun 			flush_time = mmap_time;
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 		ntotal += n;
822*4882a593Smuzhiyun 		if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
823*4882a593Smuzhiyun 			throttled = 1;
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	/* flush queue after each round in which we processed events */
827*4882a593Smuzhiyun 	if (ntotal) {
828*4882a593Smuzhiyun 		struct ordered_events *oe = &kvm->session->ordered_events;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 		oe->next_flush = flush_time;
831*4882a593Smuzhiyun 		err = ordered_events__flush(oe, OE_FLUSH__ROUND);
832*4882a593Smuzhiyun 		if (err) {
833*4882a593Smuzhiyun 			if (kvm->lost_events)
834*4882a593Smuzhiyun 				pr_info("\nLost events: %" PRIu64 "\n\n",
835*4882a593Smuzhiyun 					kvm->lost_events);
836*4882a593Smuzhiyun 			return err;
837*4882a593Smuzhiyun 		}
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	return throttled;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun static volatile int done;
844*4882a593Smuzhiyun 
sig_handler(int sig __maybe_unused)845*4882a593Smuzhiyun static void sig_handler(int sig __maybe_unused)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun 	done = 1;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun 
perf_kvm__timerfd_create(struct perf_kvm_stat * kvm)850*4882a593Smuzhiyun static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	struct itimerspec new_value;
853*4882a593Smuzhiyun 	int rc = -1;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
856*4882a593Smuzhiyun 	if (kvm->timerfd < 0) {
857*4882a593Smuzhiyun 		pr_err("timerfd_create failed\n");
858*4882a593Smuzhiyun 		goto out;
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	new_value.it_value.tv_sec = kvm->display_time;
862*4882a593Smuzhiyun 	new_value.it_value.tv_nsec = 0;
863*4882a593Smuzhiyun 	new_value.it_interval.tv_sec = kvm->display_time;
864*4882a593Smuzhiyun 	new_value.it_interval.tv_nsec = 0;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
867*4882a593Smuzhiyun 		pr_err("timerfd_settime failed: %d\n", errno);
868*4882a593Smuzhiyun 		close(kvm->timerfd);
869*4882a593Smuzhiyun 		goto out;
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	rc = 0;
873*4882a593Smuzhiyun out:
874*4882a593Smuzhiyun 	return rc;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun 
perf_kvm__handle_timerfd(struct perf_kvm_stat * kvm)877*4882a593Smuzhiyun static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun 	uint64_t c;
880*4882a593Smuzhiyun 	int rc;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	rc = read(kvm->timerfd, &c, sizeof(uint64_t));
883*4882a593Smuzhiyun 	if (rc < 0) {
884*4882a593Smuzhiyun 		if (errno == EAGAIN)
885*4882a593Smuzhiyun 			return 0;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		pr_err("Failed to read timer fd: %d\n", errno);
888*4882a593Smuzhiyun 		return -1;
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	if (rc != sizeof(uint64_t)) {
892*4882a593Smuzhiyun 		pr_err("Error reading timer fd - invalid size returned\n");
893*4882a593Smuzhiyun 		return -1;
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	if (c != 1)
897*4882a593Smuzhiyun 		pr_debug("Missed timer beats: %" PRIu64 "\n", c-1);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	/* update display */
900*4882a593Smuzhiyun 	sort_result(kvm);
901*4882a593Smuzhiyun 	print_result(kvm);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	/* reset counts */
904*4882a593Smuzhiyun 	clear_events_cache_stats(kvm->kvm_events_cache);
905*4882a593Smuzhiyun 	kvm->total_count = 0;
906*4882a593Smuzhiyun 	kvm->total_time = 0;
907*4882a593Smuzhiyun 	kvm->lost_events = 0;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	return 0;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun 
fd_set_nonblock(int fd)912*4882a593Smuzhiyun static int fd_set_nonblock(int fd)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun 	long arg = 0;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	arg = fcntl(fd, F_GETFL);
917*4882a593Smuzhiyun 	if (arg < 0) {
918*4882a593Smuzhiyun 		pr_err("Failed to get current flags for fd %d\n", fd);
919*4882a593Smuzhiyun 		return -1;
920*4882a593Smuzhiyun 	}
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) {
923*4882a593Smuzhiyun 		pr_err("Failed to set non-block option on fd %d\n", fd);
924*4882a593Smuzhiyun 		return -1;
925*4882a593Smuzhiyun 	}
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	return 0;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
perf_kvm__handle_stdin(void)930*4882a593Smuzhiyun static int perf_kvm__handle_stdin(void)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	int c;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	c = getc(stdin);
935*4882a593Smuzhiyun 	if (c == 'q')
936*4882a593Smuzhiyun 		return 1;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	return 0;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
kvm_events_live_report(struct perf_kvm_stat * kvm)941*4882a593Smuzhiyun static int kvm_events_live_report(struct perf_kvm_stat *kvm)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	int nr_stdin, ret, err = -EINVAL;
944*4882a593Smuzhiyun 	struct termios save;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	/* live flag must be set first */
947*4882a593Smuzhiyun 	kvm->live = true;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	ret = cpu_isa_config(kvm);
950*4882a593Smuzhiyun 	if (ret < 0)
951*4882a593Smuzhiyun 		return ret;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	if (!verify_vcpu(kvm->trace_vcpu) ||
954*4882a593Smuzhiyun 	    !select_key(kvm) ||
955*4882a593Smuzhiyun 	    !register_kvm_events_ops(kvm)) {
956*4882a593Smuzhiyun 		goto out;
957*4882a593Smuzhiyun 	}
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	set_term_quiet_input(&save);
960*4882a593Smuzhiyun 	init_kvm_event_record(kvm);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	signal(SIGINT, sig_handler);
963*4882a593Smuzhiyun 	signal(SIGTERM, sig_handler);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	/* add timer fd */
966*4882a593Smuzhiyun 	if (perf_kvm__timerfd_create(kvm) < 0) {
967*4882a593Smuzhiyun 		err = -1;
968*4882a593Smuzhiyun 		goto out;
969*4882a593Smuzhiyun 	}
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
972*4882a593Smuzhiyun 		goto out;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin));
975*4882a593Smuzhiyun 	if (nr_stdin < 0)
976*4882a593Smuzhiyun 		goto out;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	if (fd_set_nonblock(fileno(stdin)) != 0)
979*4882a593Smuzhiyun 		goto out;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	/* everything is good - enable the events and process */
982*4882a593Smuzhiyun 	evlist__enable(kvm->evlist);
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	while (!done) {
985*4882a593Smuzhiyun 		struct fdarray *fda = &kvm->evlist->core.pollfd;
986*4882a593Smuzhiyun 		int rc;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		rc = perf_kvm__mmap_read(kvm);
989*4882a593Smuzhiyun 		if (rc < 0)
990*4882a593Smuzhiyun 			break;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 		err = perf_kvm__handle_timerfd(kvm);
993*4882a593Smuzhiyun 		if (err)
994*4882a593Smuzhiyun 			goto out;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 		if (fda->entries[nr_stdin].revents & POLLIN)
997*4882a593Smuzhiyun 			done = perf_kvm__handle_stdin();
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		if (!rc && !done)
1000*4882a593Smuzhiyun 			err = evlist__poll(kvm->evlist, 100);
1001*4882a593Smuzhiyun 	}
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	evlist__disable(kvm->evlist);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	if (err == 0) {
1006*4882a593Smuzhiyun 		sort_result(kvm);
1007*4882a593Smuzhiyun 		print_result(kvm);
1008*4882a593Smuzhiyun 	}
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun out:
1011*4882a593Smuzhiyun 	if (kvm->timerfd >= 0)
1012*4882a593Smuzhiyun 		close(kvm->timerfd);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	tcsetattr(0, TCSAFLUSH, &save);
1015*4882a593Smuzhiyun 	return err;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun 
kvm_live_open_events(struct perf_kvm_stat * kvm)1018*4882a593Smuzhiyun static int kvm_live_open_events(struct perf_kvm_stat *kvm)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	int err, rc = -1;
1021*4882a593Smuzhiyun 	struct evsel *pos;
1022*4882a593Smuzhiyun 	struct evlist *evlist = kvm->evlist;
1023*4882a593Smuzhiyun 	char sbuf[STRERR_BUFSIZE];
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	perf_evlist__config(evlist, &kvm->opts, NULL);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	/*
1028*4882a593Smuzhiyun 	 * Note: exclude_{guest,host} do not apply here.
1029*4882a593Smuzhiyun 	 *       This command processes KVM tracepoints from host only
1030*4882a593Smuzhiyun 	 */
1031*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, pos) {
1032*4882a593Smuzhiyun 		struct perf_event_attr *attr = &pos->core.attr;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 		/* make sure these *are* set */
1035*4882a593Smuzhiyun 		evsel__set_sample_bit(pos, TID);
1036*4882a593Smuzhiyun 		evsel__set_sample_bit(pos, TIME);
1037*4882a593Smuzhiyun 		evsel__set_sample_bit(pos, CPU);
1038*4882a593Smuzhiyun 		evsel__set_sample_bit(pos, RAW);
1039*4882a593Smuzhiyun 		/* make sure these are *not*; want as small a sample as possible */
1040*4882a593Smuzhiyun 		evsel__reset_sample_bit(pos, PERIOD);
1041*4882a593Smuzhiyun 		evsel__reset_sample_bit(pos, IP);
1042*4882a593Smuzhiyun 		evsel__reset_sample_bit(pos, CALLCHAIN);
1043*4882a593Smuzhiyun 		evsel__reset_sample_bit(pos, ADDR);
1044*4882a593Smuzhiyun 		evsel__reset_sample_bit(pos, READ);
1045*4882a593Smuzhiyun 		attr->mmap = 0;
1046*4882a593Smuzhiyun 		attr->comm = 0;
1047*4882a593Smuzhiyun 		attr->task = 0;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 		attr->sample_period = 1;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 		attr->watermark = 0;
1052*4882a593Smuzhiyun 		attr->wakeup_events = 1000;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 		/* will enable all once we are ready */
1055*4882a593Smuzhiyun 		attr->disabled = 1;
1056*4882a593Smuzhiyun 	}
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	err = evlist__open(evlist);
1059*4882a593Smuzhiyun 	if (err < 0) {
1060*4882a593Smuzhiyun 		printf("Couldn't create the events: %s\n",
1061*4882a593Smuzhiyun 		       str_error_r(errno, sbuf, sizeof(sbuf)));
1062*4882a593Smuzhiyun 		goto out;
1063*4882a593Smuzhiyun 	}
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
1066*4882a593Smuzhiyun 		ui__error("Failed to mmap the events: %s\n",
1067*4882a593Smuzhiyun 			  str_error_r(errno, sbuf, sizeof(sbuf)));
1068*4882a593Smuzhiyun 		evlist__close(evlist);
1069*4882a593Smuzhiyun 		goto out;
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	rc = 0;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun out:
1075*4882a593Smuzhiyun 	return rc;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun #endif
1078*4882a593Smuzhiyun 
read_events(struct perf_kvm_stat * kvm)1079*4882a593Smuzhiyun static int read_events(struct perf_kvm_stat *kvm)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun 	int ret;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	struct perf_tool eops = {
1084*4882a593Smuzhiyun 		.sample			= process_sample_event,
1085*4882a593Smuzhiyun 		.comm			= perf_event__process_comm,
1086*4882a593Smuzhiyun 		.namespaces		= perf_event__process_namespaces,
1087*4882a593Smuzhiyun 		.ordered_events		= true,
1088*4882a593Smuzhiyun 	};
1089*4882a593Smuzhiyun 	struct perf_data file = {
1090*4882a593Smuzhiyun 		.path  = kvm->file_name,
1091*4882a593Smuzhiyun 		.mode  = PERF_DATA_MODE_READ,
1092*4882a593Smuzhiyun 		.force = kvm->force,
1093*4882a593Smuzhiyun 	};
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	kvm->tool = eops;
1096*4882a593Smuzhiyun 	kvm->session = perf_session__new(&file, false, &kvm->tool);
1097*4882a593Smuzhiyun 	if (IS_ERR(kvm->session)) {
1098*4882a593Smuzhiyun 		pr_err("Initializing perf session failed\n");
1099*4882a593Smuzhiyun 		return PTR_ERR(kvm->session);
1100*4882a593Smuzhiyun 	}
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	symbol__init(&kvm->session->header.env);
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	if (!perf_session__has_traces(kvm->session, "kvm record")) {
1105*4882a593Smuzhiyun 		ret = -EINVAL;
1106*4882a593Smuzhiyun 		goto out_delete;
1107*4882a593Smuzhiyun 	}
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	/*
1110*4882a593Smuzhiyun 	 * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
1111*4882a593Smuzhiyun 	 * traced in the old kernel.
1112*4882a593Smuzhiyun 	 */
1113*4882a593Smuzhiyun 	ret = cpu_isa_config(kvm);
1114*4882a593Smuzhiyun 	if (ret < 0)
1115*4882a593Smuzhiyun 		goto out_delete;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	ret = perf_session__process_events(kvm->session);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun out_delete:
1120*4882a593Smuzhiyun 	perf_session__delete(kvm->session);
1121*4882a593Smuzhiyun 	return ret;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
parse_target_str(struct perf_kvm_stat * kvm)1124*4882a593Smuzhiyun static int parse_target_str(struct perf_kvm_stat *kvm)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun 	if (kvm->opts.target.pid) {
1127*4882a593Smuzhiyun 		kvm->pid_list = intlist__new(kvm->opts.target.pid);
1128*4882a593Smuzhiyun 		if (kvm->pid_list == NULL) {
1129*4882a593Smuzhiyun 			pr_err("Error parsing process id string\n");
1130*4882a593Smuzhiyun 			return -EINVAL;
1131*4882a593Smuzhiyun 		}
1132*4882a593Smuzhiyun 	}
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	return 0;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
kvm_events_report_vcpu(struct perf_kvm_stat * kvm)1137*4882a593Smuzhiyun static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun 	int ret = -EINVAL;
1140*4882a593Smuzhiyun 	int vcpu = kvm->trace_vcpu;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	if (parse_target_str(kvm) != 0)
1143*4882a593Smuzhiyun 		goto exit;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	if (!verify_vcpu(vcpu))
1146*4882a593Smuzhiyun 		goto exit;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	if (!select_key(kvm))
1149*4882a593Smuzhiyun 		goto exit;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	if (!register_kvm_events_ops(kvm))
1152*4882a593Smuzhiyun 		goto exit;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	init_kvm_event_record(kvm);
1155*4882a593Smuzhiyun 	setup_pager();
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	ret = read_events(kvm);
1158*4882a593Smuzhiyun 	if (ret)
1159*4882a593Smuzhiyun 		goto exit;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	sort_result(kvm);
1162*4882a593Smuzhiyun 	print_result(kvm);
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun exit:
1165*4882a593Smuzhiyun 	return ret;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun #define STRDUP_FAIL_EXIT(s)		\
1169*4882a593Smuzhiyun 	({	char *_p;		\
1170*4882a593Smuzhiyun 	_p = strdup(s);		\
1171*4882a593Smuzhiyun 		if (!_p)		\
1172*4882a593Smuzhiyun 			return -ENOMEM;	\
1173*4882a593Smuzhiyun 		_p;			\
1174*4882a593Smuzhiyun 	})
1175*4882a593Smuzhiyun 
setup_kvm_events_tp(struct perf_kvm_stat * kvm __maybe_unused)1176*4882a593Smuzhiyun int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	return 0;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun static int
kvm_events_record(struct perf_kvm_stat * kvm,int argc,const char ** argv)1182*4882a593Smuzhiyun kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun 	unsigned int rec_argc, i, j, events_tp_size;
1185*4882a593Smuzhiyun 	const char **rec_argv;
1186*4882a593Smuzhiyun 	const char * const record_args[] = {
1187*4882a593Smuzhiyun 		"record",
1188*4882a593Smuzhiyun 		"-R",
1189*4882a593Smuzhiyun 		"-m", "1024",
1190*4882a593Smuzhiyun 		"-c", "1",
1191*4882a593Smuzhiyun 	};
1192*4882a593Smuzhiyun 	const char * const kvm_stat_record_usage[] = {
1193*4882a593Smuzhiyun 		"perf kvm stat record [<options>]",
1194*4882a593Smuzhiyun 		NULL
1195*4882a593Smuzhiyun 	};
1196*4882a593Smuzhiyun 	const char * const *events_tp;
1197*4882a593Smuzhiyun 	int ret;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	events_tp_size = 0;
1200*4882a593Smuzhiyun 	ret = setup_kvm_events_tp(kvm);
1201*4882a593Smuzhiyun 	if (ret < 0) {
1202*4882a593Smuzhiyun 		pr_err("Unable to setup the kvm tracepoints\n");
1203*4882a593Smuzhiyun 		return ret;
1204*4882a593Smuzhiyun 	}
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	for (events_tp = kvm_events_tp; *events_tp; events_tp++)
1207*4882a593Smuzhiyun 		events_tp_size++;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
1210*4882a593Smuzhiyun 		   2 * events_tp_size;
1211*4882a593Smuzhiyun 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	if (rec_argv == NULL)
1214*4882a593Smuzhiyun 		return -ENOMEM;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1217*4882a593Smuzhiyun 		rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	for (j = 0; j < events_tp_size; j++) {
1220*4882a593Smuzhiyun 		rec_argv[i++] = "-e";
1221*4882a593Smuzhiyun 		rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
1222*4882a593Smuzhiyun 	}
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
1225*4882a593Smuzhiyun 	rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	for (j = 1; j < (unsigned int)argc; j++, i++)
1228*4882a593Smuzhiyun 		rec_argv[i] = argv[j];
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN);
1231*4882a593Smuzhiyun 	set_option_flag(record_options, 0, "filter", PARSE_OPT_HIDDEN);
1232*4882a593Smuzhiyun 	set_option_flag(record_options, 'R', "raw-samples", PARSE_OPT_HIDDEN);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	set_option_flag(record_options, 'F', "freq", PARSE_OPT_DISABLED);
1235*4882a593Smuzhiyun 	set_option_flag(record_options, 0, "group", PARSE_OPT_DISABLED);
1236*4882a593Smuzhiyun 	set_option_flag(record_options, 'g', NULL, PARSE_OPT_DISABLED);
1237*4882a593Smuzhiyun 	set_option_flag(record_options, 0, "call-graph", PARSE_OPT_DISABLED);
1238*4882a593Smuzhiyun 	set_option_flag(record_options, 'd', "data", PARSE_OPT_DISABLED);
1239*4882a593Smuzhiyun 	set_option_flag(record_options, 'T', "timestamp", PARSE_OPT_DISABLED);
1240*4882a593Smuzhiyun 	set_option_flag(record_options, 'P', "period", PARSE_OPT_DISABLED);
1241*4882a593Smuzhiyun 	set_option_flag(record_options, 'n', "no-samples", PARSE_OPT_DISABLED);
1242*4882a593Smuzhiyun 	set_option_flag(record_options, 'N', "no-buildid-cache", PARSE_OPT_DISABLED);
1243*4882a593Smuzhiyun 	set_option_flag(record_options, 'B', "no-buildid", PARSE_OPT_DISABLED);
1244*4882a593Smuzhiyun 	set_option_flag(record_options, 'G', "cgroup", PARSE_OPT_DISABLED);
1245*4882a593Smuzhiyun 	set_option_flag(record_options, 'b', "branch-any", PARSE_OPT_DISABLED);
1246*4882a593Smuzhiyun 	set_option_flag(record_options, 'j', "branch-filter", PARSE_OPT_DISABLED);
1247*4882a593Smuzhiyun 	set_option_flag(record_options, 'W', "weight", PARSE_OPT_DISABLED);
1248*4882a593Smuzhiyun 	set_option_flag(record_options, 0, "transaction", PARSE_OPT_DISABLED);
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	record_usage = kvm_stat_record_usage;
1251*4882a593Smuzhiyun 	return cmd_record(i, rec_argv);
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun static int
kvm_events_report(struct perf_kvm_stat * kvm,int argc,const char ** argv)1255*4882a593Smuzhiyun kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun 	const struct option kvm_events_report_options[] = {
1258*4882a593Smuzhiyun 		OPT_STRING(0, "event", &kvm->report_event, "report event",
1259*4882a593Smuzhiyun 			   "event for reporting: vmexit, "
1260*4882a593Smuzhiyun 			   "mmio (x86 only), ioport (x86 only)"),
1261*4882a593Smuzhiyun 		OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1262*4882a593Smuzhiyun 			    "vcpu id to report"),
1263*4882a593Smuzhiyun 		OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1264*4882a593Smuzhiyun 			    "key for sorting: sample(sort by samples number)"
1265*4882a593Smuzhiyun 			    " time (sort by avg time)"),
1266*4882a593Smuzhiyun 		OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
1267*4882a593Smuzhiyun 			   "analyze events only for given process id(s)"),
1268*4882a593Smuzhiyun 		OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"),
1269*4882a593Smuzhiyun 		OPT_END()
1270*4882a593Smuzhiyun 	};
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	const char * const kvm_events_report_usage[] = {
1273*4882a593Smuzhiyun 		"perf kvm stat report [<options>]",
1274*4882a593Smuzhiyun 		NULL
1275*4882a593Smuzhiyun 	};
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	if (argc) {
1278*4882a593Smuzhiyun 		argc = parse_options(argc, argv,
1279*4882a593Smuzhiyun 				     kvm_events_report_options,
1280*4882a593Smuzhiyun 				     kvm_events_report_usage, 0);
1281*4882a593Smuzhiyun 		if (argc)
1282*4882a593Smuzhiyun 			usage_with_options(kvm_events_report_usage,
1283*4882a593Smuzhiyun 					   kvm_events_report_options);
1284*4882a593Smuzhiyun 	}
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	if (!kvm->opts.target.pid)
1287*4882a593Smuzhiyun 		kvm->opts.target.system_wide = true;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	return kvm_events_report_vcpu(kvm);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun #ifdef HAVE_TIMERFD_SUPPORT
kvm_live_event_list(void)1293*4882a593Smuzhiyun static struct evlist *kvm_live_event_list(void)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun 	struct evlist *evlist;
1296*4882a593Smuzhiyun 	char *tp, *name, *sys;
1297*4882a593Smuzhiyun 	int err = -1;
1298*4882a593Smuzhiyun 	const char * const *events_tp;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	evlist = evlist__new();
1301*4882a593Smuzhiyun 	if (evlist == NULL)
1302*4882a593Smuzhiyun 		return NULL;
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 		tp = strdup(*events_tp);
1307*4882a593Smuzhiyun 		if (tp == NULL)
1308*4882a593Smuzhiyun 			goto out;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 		/* split tracepoint into subsystem and name */
1311*4882a593Smuzhiyun 		sys = tp;
1312*4882a593Smuzhiyun 		name = strchr(tp, ':');
1313*4882a593Smuzhiyun 		if (name == NULL) {
1314*4882a593Smuzhiyun 			pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
1315*4882a593Smuzhiyun 			       *events_tp);
1316*4882a593Smuzhiyun 			free(tp);
1317*4882a593Smuzhiyun 			goto out;
1318*4882a593Smuzhiyun 		}
1319*4882a593Smuzhiyun 		*name = '\0';
1320*4882a593Smuzhiyun 		name++;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 		if (evlist__add_newtp(evlist, sys, name, NULL)) {
1323*4882a593Smuzhiyun 			pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
1324*4882a593Smuzhiyun 			free(tp);
1325*4882a593Smuzhiyun 			goto out;
1326*4882a593Smuzhiyun 		}
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 		free(tp);
1329*4882a593Smuzhiyun 	}
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	err = 0;
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun out:
1334*4882a593Smuzhiyun 	if (err) {
1335*4882a593Smuzhiyun 		evlist__delete(evlist);
1336*4882a593Smuzhiyun 		evlist = NULL;
1337*4882a593Smuzhiyun 	}
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	return evlist;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun 
kvm_events_live(struct perf_kvm_stat * kvm,int argc,const char ** argv)1342*4882a593Smuzhiyun static int kvm_events_live(struct perf_kvm_stat *kvm,
1343*4882a593Smuzhiyun 			   int argc, const char **argv)
1344*4882a593Smuzhiyun {
1345*4882a593Smuzhiyun 	char errbuf[BUFSIZ];
1346*4882a593Smuzhiyun 	int err;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	const struct option live_options[] = {
1349*4882a593Smuzhiyun 		OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
1350*4882a593Smuzhiyun 			"record events on existing process id"),
1351*4882a593Smuzhiyun 		OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
1352*4882a593Smuzhiyun 			"number of mmap data pages",
1353*4882a593Smuzhiyun 			perf_evlist__parse_mmap_pages),
1354*4882a593Smuzhiyun 		OPT_INCR('v', "verbose", &verbose,
1355*4882a593Smuzhiyun 			"be more verbose (show counter open errors, etc)"),
1356*4882a593Smuzhiyun 		OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
1357*4882a593Smuzhiyun 			"system-wide collection from all CPUs"),
1358*4882a593Smuzhiyun 		OPT_UINTEGER('d', "display", &kvm->display_time,
1359*4882a593Smuzhiyun 			"time in seconds between display updates"),
1360*4882a593Smuzhiyun 		OPT_STRING(0, "event", &kvm->report_event, "report event",
1361*4882a593Smuzhiyun 			"event for reporting: "
1362*4882a593Smuzhiyun 			"vmexit, mmio (x86 only), ioport (x86 only)"),
1363*4882a593Smuzhiyun 		OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1364*4882a593Smuzhiyun 			"vcpu id to report"),
1365*4882a593Smuzhiyun 		OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1366*4882a593Smuzhiyun 			"key for sorting: sample(sort by samples number)"
1367*4882a593Smuzhiyun 			" time (sort by avg time)"),
1368*4882a593Smuzhiyun 		OPT_U64(0, "duration", &kvm->duration,
1369*4882a593Smuzhiyun 			"show events other than"
1370*4882a593Smuzhiyun 			" HLT (x86 only) or Wait state (s390 only)"
1371*4882a593Smuzhiyun 			" that take longer than duration usecs"),
1372*4882a593Smuzhiyun 		OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1373*4882a593Smuzhiyun 				"per thread proc mmap processing timeout in ms"),
1374*4882a593Smuzhiyun 		OPT_END()
1375*4882a593Smuzhiyun 	};
1376*4882a593Smuzhiyun 	const char * const live_usage[] = {
1377*4882a593Smuzhiyun 		"perf kvm stat live [<options>]",
1378*4882a593Smuzhiyun 		NULL
1379*4882a593Smuzhiyun 	};
1380*4882a593Smuzhiyun 	struct perf_data data = {
1381*4882a593Smuzhiyun 		.mode = PERF_DATA_MODE_WRITE,
1382*4882a593Smuzhiyun 	};
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	/* event handling */
1386*4882a593Smuzhiyun 	kvm->tool.sample = process_sample_event;
1387*4882a593Smuzhiyun 	kvm->tool.comm   = perf_event__process_comm;
1388*4882a593Smuzhiyun 	kvm->tool.exit   = perf_event__process_exit;
1389*4882a593Smuzhiyun 	kvm->tool.fork   = perf_event__process_fork;
1390*4882a593Smuzhiyun 	kvm->tool.lost   = process_lost_event;
1391*4882a593Smuzhiyun 	kvm->tool.namespaces  = perf_event__process_namespaces;
1392*4882a593Smuzhiyun 	kvm->tool.ordered_events = true;
1393*4882a593Smuzhiyun 	perf_tool__fill_defaults(&kvm->tool);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	/* set defaults */
1396*4882a593Smuzhiyun 	kvm->display_time = 1;
1397*4882a593Smuzhiyun 	kvm->opts.user_interval = 1;
1398*4882a593Smuzhiyun 	kvm->opts.mmap_pages = 512;
1399*4882a593Smuzhiyun 	kvm->opts.target.uses_mmap = false;
1400*4882a593Smuzhiyun 	kvm->opts.target.uid_str = NULL;
1401*4882a593Smuzhiyun 	kvm->opts.target.uid = UINT_MAX;
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	symbol__init(NULL);
1404*4882a593Smuzhiyun 	disable_buildid_cache();
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	use_browser = 0;
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	if (argc) {
1409*4882a593Smuzhiyun 		argc = parse_options(argc, argv, live_options,
1410*4882a593Smuzhiyun 				     live_usage, 0);
1411*4882a593Smuzhiyun 		if (argc)
1412*4882a593Smuzhiyun 			usage_with_options(live_usage, live_options);
1413*4882a593Smuzhiyun 	}
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	kvm->duration *= NSEC_PER_USEC;   /* convert usec to nsec */
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	/*
1418*4882a593Smuzhiyun 	 * target related setups
1419*4882a593Smuzhiyun 	 */
1420*4882a593Smuzhiyun 	err = target__validate(&kvm->opts.target);
1421*4882a593Smuzhiyun 	if (err) {
1422*4882a593Smuzhiyun 		target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
1423*4882a593Smuzhiyun 		ui__warning("%s", errbuf);
1424*4882a593Smuzhiyun 	}
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	if (target__none(&kvm->opts.target))
1427*4882a593Smuzhiyun 		kvm->opts.target.system_wide = true;
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	/*
1431*4882a593Smuzhiyun 	 * generate the event list
1432*4882a593Smuzhiyun 	 */
1433*4882a593Smuzhiyun 	err = setup_kvm_events_tp(kvm);
1434*4882a593Smuzhiyun 	if (err < 0) {
1435*4882a593Smuzhiyun 		pr_err("Unable to setup the kvm tracepoints\n");
1436*4882a593Smuzhiyun 		return err;
1437*4882a593Smuzhiyun 	}
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	kvm->evlist = kvm_live_event_list();
1440*4882a593Smuzhiyun 	if (kvm->evlist == NULL) {
1441*4882a593Smuzhiyun 		err = -1;
1442*4882a593Smuzhiyun 		goto out;
1443*4882a593Smuzhiyun 	}
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 	if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
1446*4882a593Smuzhiyun 		usage_with_options(live_usage, live_options);
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	/*
1449*4882a593Smuzhiyun 	 * perf session
1450*4882a593Smuzhiyun 	 */
1451*4882a593Smuzhiyun 	kvm->session = perf_session__new(&data, false, &kvm->tool);
1452*4882a593Smuzhiyun 	if (IS_ERR(kvm->session)) {
1453*4882a593Smuzhiyun 		err = PTR_ERR(kvm->session);
1454*4882a593Smuzhiyun 		goto out;
1455*4882a593Smuzhiyun 	}
1456*4882a593Smuzhiyun 	kvm->session->evlist = kvm->evlist;
1457*4882a593Smuzhiyun 	perf_session__set_id_hdr_size(kvm->session);
1458*4882a593Smuzhiyun 	ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
1459*4882a593Smuzhiyun 	machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
1460*4882a593Smuzhiyun 				    kvm->evlist->core.threads, false, 1);
1461*4882a593Smuzhiyun 	err = kvm_live_open_events(kvm);
1462*4882a593Smuzhiyun 	if (err)
1463*4882a593Smuzhiyun 		goto out;
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	err = kvm_events_live_report(kvm);
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun out:
1468*4882a593Smuzhiyun 	perf_session__delete(kvm->session);
1469*4882a593Smuzhiyun 	kvm->session = NULL;
1470*4882a593Smuzhiyun 	evlist__delete(kvm->evlist);
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	return err;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun #endif
1475*4882a593Smuzhiyun 
print_kvm_stat_usage(void)1476*4882a593Smuzhiyun static void print_kvm_stat_usage(void)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun 	printf("Usage: perf kvm stat <command>\n\n");
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	printf("# Available commands:\n");
1481*4882a593Smuzhiyun 	printf("\trecord: record kvm events\n");
1482*4882a593Smuzhiyun 	printf("\treport: report statistical data of kvm events\n");
1483*4882a593Smuzhiyun 	printf("\tlive:   live reporting of statistical data of kvm events\n");
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	printf("\nOtherwise, it is the alias of 'perf stat':\n");
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun 
kvm_cmd_stat(const char * file_name,int argc,const char ** argv)1488*4882a593Smuzhiyun static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun 	struct perf_kvm_stat kvm = {
1491*4882a593Smuzhiyun 		.file_name = file_name,
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 		.trace_vcpu	= -1,
1494*4882a593Smuzhiyun 		.report_event	= "vmexit",
1495*4882a593Smuzhiyun 		.sort_key	= "sample",
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	};
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	if (argc == 1) {
1500*4882a593Smuzhiyun 		print_kvm_stat_usage();
1501*4882a593Smuzhiyun 		goto perf_stat;
1502*4882a593Smuzhiyun 	}
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	if (!strncmp(argv[1], "rec", 3))
1505*4882a593Smuzhiyun 		return kvm_events_record(&kvm, argc - 1, argv + 1);
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	if (!strncmp(argv[1], "rep", 3))
1508*4882a593Smuzhiyun 		return kvm_events_report(&kvm, argc - 1 , argv + 1);
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun #ifdef HAVE_TIMERFD_SUPPORT
1511*4882a593Smuzhiyun 	if (!strncmp(argv[1], "live", 4))
1512*4882a593Smuzhiyun 		return kvm_events_live(&kvm, argc - 1 , argv + 1);
1513*4882a593Smuzhiyun #endif
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun perf_stat:
1516*4882a593Smuzhiyun 	return cmd_stat(argc, argv);
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun #endif /* HAVE_KVM_STAT_SUPPORT */
1519*4882a593Smuzhiyun 
kvm_add_default_arch_event(int * argc __maybe_unused,const char ** argv __maybe_unused)1520*4882a593Smuzhiyun int __weak kvm_add_default_arch_event(int *argc __maybe_unused,
1521*4882a593Smuzhiyun 					const char **argv __maybe_unused)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun 	return 0;
1524*4882a593Smuzhiyun }
1525*4882a593Smuzhiyun 
__cmd_record(const char * file_name,int argc,const char ** argv)1526*4882a593Smuzhiyun static int __cmd_record(const char *file_name, int argc, const char **argv)
1527*4882a593Smuzhiyun {
1528*4882a593Smuzhiyun 	int rec_argc, i = 0, j, ret;
1529*4882a593Smuzhiyun 	const char **rec_argv;
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	ret = kvm_add_default_arch_event(&argc, argv);
1532*4882a593Smuzhiyun 	if (ret)
1533*4882a593Smuzhiyun 		return -EINVAL;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	rec_argc = argc + 2;
1536*4882a593Smuzhiyun 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1537*4882a593Smuzhiyun 	rec_argv[i++] = strdup("record");
1538*4882a593Smuzhiyun 	rec_argv[i++] = strdup("-o");
1539*4882a593Smuzhiyun 	rec_argv[i++] = strdup(file_name);
1540*4882a593Smuzhiyun 	for (j = 1; j < argc; j++, i++)
1541*4882a593Smuzhiyun 		rec_argv[i] = argv[j];
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	BUG_ON(i != rec_argc);
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 	return cmd_record(i, rec_argv);
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun 
__cmd_report(const char * file_name,int argc,const char ** argv)1548*4882a593Smuzhiyun static int __cmd_report(const char *file_name, int argc, const char **argv)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun 	int rec_argc, i = 0, j;
1551*4882a593Smuzhiyun 	const char **rec_argv;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	rec_argc = argc + 2;
1554*4882a593Smuzhiyun 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1555*4882a593Smuzhiyun 	rec_argv[i++] = strdup("report");
1556*4882a593Smuzhiyun 	rec_argv[i++] = strdup("-i");
1557*4882a593Smuzhiyun 	rec_argv[i++] = strdup(file_name);
1558*4882a593Smuzhiyun 	for (j = 1; j < argc; j++, i++)
1559*4882a593Smuzhiyun 		rec_argv[i] = argv[j];
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	BUG_ON(i != rec_argc);
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 	return cmd_report(i, rec_argv);
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun static int
__cmd_buildid_list(const char * file_name,int argc,const char ** argv)1567*4882a593Smuzhiyun __cmd_buildid_list(const char *file_name, int argc, const char **argv)
1568*4882a593Smuzhiyun {
1569*4882a593Smuzhiyun 	int rec_argc, i = 0, j;
1570*4882a593Smuzhiyun 	const char **rec_argv;
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	rec_argc = argc + 2;
1573*4882a593Smuzhiyun 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1574*4882a593Smuzhiyun 	rec_argv[i++] = strdup("buildid-list");
1575*4882a593Smuzhiyun 	rec_argv[i++] = strdup("-i");
1576*4882a593Smuzhiyun 	rec_argv[i++] = strdup(file_name);
1577*4882a593Smuzhiyun 	for (j = 1; j < argc; j++, i++)
1578*4882a593Smuzhiyun 		rec_argv[i] = argv[j];
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	BUG_ON(i != rec_argc);
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	return cmd_buildid_list(i, rec_argv);
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun 
cmd_kvm(int argc,const char ** argv)1585*4882a593Smuzhiyun int cmd_kvm(int argc, const char **argv)
1586*4882a593Smuzhiyun {
1587*4882a593Smuzhiyun 	const char *file_name = NULL;
1588*4882a593Smuzhiyun 	const struct option kvm_options[] = {
1589*4882a593Smuzhiyun 		OPT_STRING('i', "input", &file_name, "file",
1590*4882a593Smuzhiyun 			   "Input file name"),
1591*4882a593Smuzhiyun 		OPT_STRING('o', "output", &file_name, "file",
1592*4882a593Smuzhiyun 			   "Output file name"),
1593*4882a593Smuzhiyun 		OPT_BOOLEAN(0, "guest", &perf_guest,
1594*4882a593Smuzhiyun 			    "Collect guest os data"),
1595*4882a593Smuzhiyun 		OPT_BOOLEAN(0, "host", &perf_host,
1596*4882a593Smuzhiyun 			    "Collect host os data"),
1597*4882a593Smuzhiyun 		OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
1598*4882a593Smuzhiyun 			   "guest mount directory under which every guest os"
1599*4882a593Smuzhiyun 			   " instance has a subdir"),
1600*4882a593Smuzhiyun 		OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
1601*4882a593Smuzhiyun 			   "file", "file saving guest os vmlinux"),
1602*4882a593Smuzhiyun 		OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
1603*4882a593Smuzhiyun 			   "file", "file saving guest os /proc/kallsyms"),
1604*4882a593Smuzhiyun 		OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
1605*4882a593Smuzhiyun 			   "file", "file saving guest os /proc/modules"),
1606*4882a593Smuzhiyun 		OPT_INCR('v', "verbose", &verbose,
1607*4882a593Smuzhiyun 			    "be more verbose (show counter open errors, etc)"),
1608*4882a593Smuzhiyun 		OPT_END()
1609*4882a593Smuzhiyun 	};
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 	const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
1612*4882a593Smuzhiyun 						"buildid-list", "stat", NULL };
1613*4882a593Smuzhiyun 	const char *kvm_usage[] = { NULL, NULL };
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	perf_host  = 0;
1616*4882a593Smuzhiyun 	perf_guest = 1;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
1619*4882a593Smuzhiyun 					PARSE_OPT_STOP_AT_NON_OPTION);
1620*4882a593Smuzhiyun 	if (!argc)
1621*4882a593Smuzhiyun 		usage_with_options(kvm_usage, kvm_options);
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	if (!perf_host)
1624*4882a593Smuzhiyun 		perf_guest = 1;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	if (!file_name) {
1627*4882a593Smuzhiyun 		file_name = get_filename_for_perf_kvm();
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 		if (!file_name) {
1630*4882a593Smuzhiyun 			pr_err("Failed to allocate memory for filename\n");
1631*4882a593Smuzhiyun 			return -ENOMEM;
1632*4882a593Smuzhiyun 		}
1633*4882a593Smuzhiyun 	}
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	if (!strncmp(argv[0], "rec", 3))
1636*4882a593Smuzhiyun 		return __cmd_record(file_name, argc, argv);
1637*4882a593Smuzhiyun 	else if (!strncmp(argv[0], "rep", 3))
1638*4882a593Smuzhiyun 		return __cmd_report(file_name, argc, argv);
1639*4882a593Smuzhiyun 	else if (!strncmp(argv[0], "diff", 4))
1640*4882a593Smuzhiyun 		return cmd_diff(argc, argv);
1641*4882a593Smuzhiyun 	else if (!strncmp(argv[0], "top", 3))
1642*4882a593Smuzhiyun 		return cmd_top(argc, argv);
1643*4882a593Smuzhiyun 	else if (!strncmp(argv[0], "buildid-list", 12))
1644*4882a593Smuzhiyun 		return __cmd_buildid_list(file_name, argc, argv);
1645*4882a593Smuzhiyun #ifdef HAVE_KVM_STAT_SUPPORT
1646*4882a593Smuzhiyun 	else if (!strncmp(argv[0], "stat", 4))
1647*4882a593Smuzhiyun 		return kvm_cmd_stat(file_name, argc, argv);
1648*4882a593Smuzhiyun #endif
1649*4882a593Smuzhiyun 	else
1650*4882a593Smuzhiyun 		usage_with_options(kvm_usage, kvm_options);
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	return 0;
1653*4882a593Smuzhiyun }
1654